source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
__init__.py | import datetime
import copy
import json
import os
import threading
import time
import pkg_resources
from sqlalchemy.exc import IntegrityError
import anchore_engine.clients.services.common
import anchore_engine.common
# anchore modules
import anchore_engine.common.helpers
import anchore_engine.common.images
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.events as events
import anchore_engine.subsys.metrics
import anchore_engine.subsys.servicestatus
from anchore_engine import db
from anchore_engine.clients import docker_registry
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services import simplequeue
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.common.helpers import make_policy_record
from anchore_engine.db import (
db_catalog_image,
db_policybundle,
db_queues,
db_registries,
db_subscriptions,
db_anchore,
db_services,
AccountStates,
AccountTypes,
)
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.services.catalog import archiver
from anchore_engine.services.catalog import catalog_impl
from anchore_engine.services.catalog.exceptions import (
TagManifestParseError,
TagManifestNotFoundError,
PolicyBundleValidationError,
)
from anchore_engine.services.catalog.image_content.get_image_content import (
ImageManifestContentGetter,
ImageDockerfileContentGetter,
ImageContentGetter,
)
from anchore_engine.db.entities.catalog import (
ImageImportContent,
ImageImportOperation,
ImportState,
)
from anchore_engine.subsys import (
notifications,
taskstate,
logger,
archive,
object_store,
)
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.services.catalog import archiver
from anchore_engine.subsys.object_store.config import (
DEFAULT_OBJECT_STORE_MANAGER_ID,
ANALYSIS_ARCHIVE_MANAGER_ID,
ALT_OBJECT_STORE_CONFIG_KEY,
)
from anchore_engine.common.schemas import (
QueueMessage,
AnalysisQueueMessage,
ImportQueueMessage,
ImportManifest,
)
from anchore_engine.subsys.object_store.config import (
DEFAULT_OBJECT_STORE_MANAGER_ID,
ALT_OBJECT_STORE_CONFIG_KEY,
)
from anchore_engine.utils import AnchoreException
##########################################################
# monitor section
def do_user_resources_delete(userId):
return_object = {}
httpcode = 500
resourcemaps = [
(
"subscriptions",
db.db_subscriptions.get_all_byuserId,
catalog_impl.do_subscription_delete,
),
("registries", db.db_registries.get_byuserId, catalog_impl.do_registry_delete),
(
"evaluations",
db.db_policyeval.get_all_byuserId,
catalog_impl.do_evaluation_delete,
),
(
"policybundles",
db.db_policybundle.get_all_byuserId,
catalog_impl.do_policy_delete,
),
("images", db.db_catalog_image.get_all_byuserId, catalog_impl.do_image_delete),
(
"archive",
db.db_archivemetadata.list_all_byuserId,
catalog_impl.do_archive_delete,
),
]
limit = 2048
all_total = 0
all_deleted = 0
for resourcename, getfunc, delfunc in resourcemaps:
try:
deleted = 0
total = 0
with db.session_scope() as dbsession:
records = getfunc(userId, session=dbsession, limit=limit)
total = len(records)
for record in records:
delfunc(userId, record, dbsession, force=True)
deleted = deleted + 1
return_object["total_{}".format(resourcename)] = total
return_object["total_{}_deleted".format(resourcename)] = deleted
all_total = all_total + total
all_deleted = all_deleted + deleted
if total or deleted:
logger.debug(
"deleted {} / {} {} records for user {}".format(
deleted, total, resourcename, userId
)
)
except Exception as err:
logger.warn(
"failed to delete resources in {} for user {}, will continue and try again - exception: {}".format(
resourcename, userId, err
)
)
return_object["all_total"] = all_total
return_object["all_deleted"] = all_deleted
httpcode = 200
return return_object, httpcode
def handle_account_resource_cleanup(*args, **kwargs):
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
# iterate over all deleted account records, and perform resource cleanup for that account. If there are no longer any resources associated with the account id, then finally delete the account record itself
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(
with_state=AccountStates.deleting, include_service=False
)
for account in accounts:
userId = account["name"]
logger.debug(
"Inspecting account {} for resource cleanup tasks".format(userId)
)
try:
return_object, httpcode = do_user_resources_delete(userId)
logger.debug(
"Resources for deleted account cleaned-up: {} - {}".format(
return_object, httpcode
)
)
if (
return_object.get("all_total", None) == 0
and return_object.get("all_deleted", None) == 0
):
logger.debug(
"Resources for pending deleted user {} cleared - deleting account".format(
userId
)
)
with db.session_scope() as session:
mgr = manager_factory.for_session(session)
mgr.delete_account(userId)
else:
logger.debug(
"resources for pending deleted user {} not entirely cleared this cycle".format(
userId
)
)
except Exception as err:
raise Exception(
"failed to delete user {} resources - exception: {}".format(
userId, err
)
)
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def handle_vulnerability_scan(*args, **kwargs):
global feed_sync_updated
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(
["policy_engine"]
)
if not all_ready:
logger.debug(
"FIRING DONE: feed syncer (skipping due to required services not being available)"
)
try:
kwargs["mythread"]["last_return"] = False
except:
pass
return True
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
for account in accounts:
userId = account["name"]
# vulnerability scans
doperform = False
vuln_subs = []
for subscription_type in ["vuln_update"]:
dbfilter = {"subscription_type": subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(
userId, session=dbsession, **dbfilter
)
for subscription_record in subscription_records:
if subscription_record["active"]:
image_info = anchore_engine.common.images.get_image_info(
userId,
"docker",
subscription_record["subscription_key"],
registry_lookup=False,
registry_creds=(None, None),
)
dbfilter = {
"registry": image_info["registry"],
"repo": image_info["repo"],
"tag": image_info["tag"],
}
if (
dbfilter,
subscription_record["subscription_value"],
) not in vuln_subs:
vuln_subs.append(
(dbfilter, subscription_record["subscription_value"])
)
for (dbfilter, value) in vuln_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(
userId,
"docker",
dbfilter=dbfilter,
onlylatest=False,
session=dbsession,
)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value["digests"])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]["imageDigest"])
current_imageDigest = image_records[0]["imageDigest"]
for image_record in image_records:
if image_record["analysis_status"] == taskstate.complete_state(
"analyze"
) and image_record["image_status"] == taskstate.base_state(
"image_status"
):
imageDigest = image_record["imageDigest"]
if imageDigest not in digests:
continue
fulltag = (
dbfilter["registry"]
+ "/"
+ dbfilter["repo"]
+ ":"
+ dbfilter["tag"]
)
doperform = True
if doperform:
logger.debug(
"calling vuln scan perform: "
+ str(fulltag)
+ " : "
+ str(imageDigest)
)
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_vulnerability_scan(
userId,
imageDigest,
dbsession,
scantag=fulltag,
force_refresh=False,
is_current=(imageDigest == current_imageDigest),
)
except Exception as err:
logger.warn(
"vulnerability scan failed - exception: "
+ str(err)
)
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def handle_service_watcher(*args, **kwargs):
# global latest_service_records
cycle_timer = kwargs["mythread"]["cycle_timer"]
max_service_heartbeat_timer = 300
max_service_orphaned_timer = 3600
max_service_cleanup_timer = 86400
while True:
logger.debug("FIRING: service watcher")
localconfig = anchore_engine.configuration.localconfig.get_config()
verify = localconfig["internal_ssl_verify"]
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
event_account = anchore_engine.configuration.localconfig.ADMIN_ACCOUNT_NAME
anchore_services = db_services.get_all(session=dbsession)
# update the global latest service record dict in services.common
# latest_service_records.update({"service_records": copy.deepcopy(anchore_services)})
# fields to update each tick:
#
# heartbeat (current time)
# status (true/false)
# status_message (state of service)
# short_description(api return)
#
for service in anchore_services:
event = None
service_update_record = {}
if (
service["servicename"] == "catalog"
and service["hostid"] == localconfig["host_id"]
):
status = anchore_engine.subsys.servicestatus.get_status(service)
service_update_record.update(
{
"heartbeat": int(time.time()),
"status": True,
"status_message": taskstate.complete_state(
"service_status"
),
"short_description": json.dumps(status),
}
)
else:
try:
try:
status = json.loads(service["short_description"])
except:
status = {"up": False, "available": False}
# set to down until the response can be parsed
service_update_record["status"] = False
service_update_record["status_message"] = taskstate.fault_state(
"service_status"
)
service_update_record[
"short_description"
] = "could not get service status description"
try:
# NOTE: this is where any service-specific decisions based on the 'status' record could happen - now all services are the same
if status["up"] and status["available"]:
if (
time.time() - service["heartbeat"]
> max_service_heartbeat_timer
):
logger.warn(
"no service heartbeat within allowed time period ({}) for service ({}/{}) - disabling service".format(
max_service_heartbeat_timer,
service["hostid"],
service["servicename"],
)
)
service_update_record[
"short_description"
] = "no heartbeat from service in ({}) seconds".format(
max_service_heartbeat_timer
)
# Trigger an event to log the down service
event = events.ServiceDowned(
user_id=event_account,
name=service["servicename"],
host=service["hostid"],
url=service["base_url"],
cause="no heartbeat from service in ({}) seconds".format(
max_service_heartbeat_timer
),
)
else:
service_update_record["status"] = True
service_update_record[
"status_message"
] = taskstate.complete_state("service_status")
try:
service_update_record[
"short_description"
] = json.dumps(status)
except:
service_update_record[
"short_description"
] = str(status)
else:
# handle the down state transitions
if (
time.time() - service["heartbeat"]
> max_service_cleanup_timer
):
# remove the service entirely
logger.warn(
"no service heartbeat within allowed time period ({}) for service ({}/{}) - removing service".format(
max_service_cleanup_timer,
service["hostid"],
service["servicename"],
)
)
try:
# remove the service record from DB
removed_hostid = service["hostid"]
removed_servicename = service["servicename"]
removed_base_url = service["base_url"]
db_services.delete(
removed_hostid,
removed_servicename,
session=dbsession,
)
service_update_record = None
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceRemoved(
user_id=event_account,
name=removed_servicename,
host=removed_hostid,
url=removed_base_url,
cause="no heartbeat from service in ({}) seconds".format(
max_service_cleanup_timer
),
)
except Exception as err:
logger.warn(
"attempt to remove service {}/{} failed - exception: {}".format(
service.get("hostid"),
service.get("servicename"),
err,
)
)
elif (
time.time() - service["heartbeat"]
> max_service_orphaned_timer
):
# transition down service to orphaned
logger.warn(
"no service heartbeat within allowed time period ({}) for service ({}/{}) - orphaning service".format(
max_service_orphaned_timer,
service["hostid"],
service["servicename"],
)
)
service_update_record["status"] = False
service_update_record[
"status_message"
] = taskstate.orphaned_state("service_status")
service_update_record[
"short_description"
] = "no heartbeat from service in ({}) seconds".format(
max_service_orphaned_timer
)
if service[
"status_message"
] != taskstate.orphaned_state("service_status"):
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceOrphaned(
user_id=event_account,
name=service["servicename"],
host=service["hostid"],
url=service["base_url"],
cause="no heartbeat from service in ({}) seconds".format(
max_service_orphaned_timer
),
)
except Exception as err:
logger.warn(
"could not get/parse service status record for service: - exception: "
+ str(err)
)
except Exception as err:
logger.warn(
"could not get service status: "
+ str(service)
+ " : exception: "
+ str(err)
+ " : "
+ str(err.__dict__)
)
if service_update_record:
service_update_record["status"] = False
service_update_record[
"status_message"
] = taskstate.fault_state("service_status")
service_update_record[
"short_description"
] = "could not get service status"
finally:
if event:
catalog_impl.add_event(event, dbsession)
if service_update_record:
service.update(service_update_record)
try:
db_services.update_record(service, session=dbsession)
except Exception as err:
logger.warn("could not update DB: " + str(err))
logger.debug("FIRING DONE: service watcher")
try:
kwargs["mythread"]["last_return"] = True
except:
pass
time.sleep(cycle_timer)
return True
def handle_repo_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
for account in accounts:
userId = account["name"]
dbfilter = {}
with db.session_scope() as dbsession:
dbfilter["subscription_type"] = "repo_update"
subscription_records = db_subscriptions.get_byfilter(
userId, session=dbsession, **dbfilter
)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn(
"failed to refresh registry credentials - exception: " + str(err)
)
for subscription_record in subscription_records:
if not subscription_record["active"]:
continue
event = None
subscription_id = subscription_record["subscription_id"]
try:
regrepo = subscription_record["subscription_key"]
if subscription_record["subscription_value"]:
subscription_value = json.loads(
subscription_record["subscription_value"]
)
if "autosubscribe" not in subscription_value:
subscription_value["autosubscribe"] = False
if "lookuptag" not in subscription_value:
subscription_value["lookuptag"] = "latest"
else:
subscription_value = {"autosubscribe": False, "lookuptag": "latest"}
stored_repotags = subscription_value.get("repotags", [])
fulltag = regrepo + ":" + subscription_value.get("lookuptag", "latest")
image_info = anchore_engine.common.images.get_image_info(
userId,
"docker",
fulltag,
registry_lookup=False,
registry_creds=(None, None),
)
# List tags
try:
curr_repotags = docker_registry.get_repo_tags(
userId, image_info, registry_creds=registry_creds
)
except AnchoreException as e:
event = events.ListTagsFailed(
user_id=userId,
registry=image_info.get("registry", None),
repository=image_info.get("repo", None),
error=e.to_dict(),
)
raise e
autosubscribes = ["analysis_update"]
if subscription_value["autosubscribe"]:
autosubscribes.append("tag_update")
repotags = set(curr_repotags).difference(set(stored_repotags))
if repotags:
logger.debug(
"new tags to watch in repo ("
+ str(regrepo)
+ "): "
+ str(repotags)
)
added_repotags = stored_repotags
for repotag in repotags:
try:
fulltag = (
image_info["registry"]
+ "/"
+ image_info["repo"]
+ ":"
+ repotag
)
logger.debug("found new tag in repo: " + str(fulltag))
try:
new_image_info = (
anchore_engine.common.images.get_image_info(
userId,
"docker",
fulltag,
registry_lookup=True,
registry_creds=registry_creds,
)
)
except Exception as err:
event = events.ImageRegistryLookupFailed(
user_id=userId,
image_pull_string=fulltag,
data=err.__dict__,
)
raise err
manifest = None
try:
if "manifest" in new_image_info:
try:
manifest = json.dumps(
new_image_info["manifest"]
)
except Exception as err:
raise TagManifestParseError(
cause=err,
tag=fulltag,
manifest=new_image_info["manifest"],
msg="Failed to serialize manifest into JSON formatted string",
)
else:
raise TagManifestNotFoundError(
tag=fulltag,
msg="No manifest from get_image_info",
)
except AnchoreException as e:
event = events.TagManifestParseFailed(
user_id=userId, tag=fulltag, error=e.to_dict()
)
raise
with db.session_scope() as dbsession:
# One last check for repo subscription status before adding image
if not db_subscriptions.is_active(
userId, subscription_id, dbsession
):
logger.debug(
"Aborting repo scan for %s for account %s, subscription is no longer active"
% (regrepo, userId)
)
break
logger.debug(
"adding/updating image from repo scan "
+ str(new_image_info["fulltag"])
)
# add the image
image_records = catalog_impl.add_or_update_image(
dbsession,
userId,
new_image_info["imageId"],
tags=[new_image_info["fulltag"]],
digests=[new_image_info["fulldigest"]],
parentdigest=new_image_info.get(
"parentdigest", None
),
manifest=manifest,
)
# add the subscription records with the configured default activations
for stype in anchore_engine.common.subscription_types:
activate = False
if stype == "repo_update":
continue
elif stype in autosubscribes:
activate = True
db_subscriptions.add(
userId,
new_image_info["fulltag"],
stype,
{"active": activate},
session=dbsession,
)
added_repotags.append(repotag)
except Exception as err:
logger.warn(
"could not add discovered tag from repo ("
+ str(fulltag)
+ ") - exception: "
+ str(err)
)
# update the subscription record with the latest successfully added image tags
with db.session_scope() as dbsession:
subscription_value["repotags"] = added_repotags
subscription_value["tagcount"] = len(added_repotags)
db_subscriptions.update_subscription_value(
account=userId,
subscription_id=subscription_id,
subscription_value=json.dumps(subscription_value),
session=dbsession,
)
else:
logger.debug(
"no new images in watched repo (" + str(regrepo) + "): skipping"
)
except Exception as err:
logger.warn(
"failed to process repo_update subscription - exception: "
+ str(err)
)
finally:
if event:
with db.session_scope() as dbsession:
catalog_impl.add_event(event, dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def handle_image_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
for account in accounts:
userId = account["name"]
if account["type"] == AccountTypes.service: # userId == 'anchore-system':
continue
with db.session_scope() as dbsession:
dbfilter = {}
dbfilter["subscription_type"] = "tag_update"
subscription_records = db_subscriptions.get_byfilter(
userId, session=dbsession, **dbfilter
)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn(
"failed to refresh registry credentials - exception: " + str(err)
)
alltags = []
for subscription_record in subscription_records:
if not subscription_record["active"]:
continue
try:
fulltag = subscription_record["subscription_key"]
if fulltag not in alltags:
alltags.append(fulltag)
except Exception as err:
logger.warn(
"problem creating taglist for image watcher - exception: "
+ str(err)
)
for registry_record in registry_creds:
try:
registry_status = docker_registry.ping_docker_registry(registry_record)
except Exception as err:
registry_record["record_state_key"] = "auth_failure"
registry_record["record_state_val"] = str(int(time.time()))
logger.warn("registry ping failed - exception: " + str(err))
logger.debug("checking tags for update: " + str(userId) + " : " + str(alltags))
for fulltag in alltags:
event = None
try:
logger.debug("checking image latest info from registry: " + fulltag)
try:
image_info = anchore_engine.common.images.get_image_info(
userId,
"docker",
fulltag,
registry_lookup=True,
registry_creds=registry_creds,
)
except Exception as err:
event = events.ImageRegistryLookupFailed(
user_id=userId, image_pull_string=fulltag, data=err.__dict__
)
raise err
logger.spew("checking image: got registry info: " + str(image_info))
manifest = None
try:
if "manifest" in image_info:
try:
manifest = json.dumps(image_info["manifest"])
except Exception as err:
raise TagManifestParseError(
cause=err,
tag=fulltag,
manifest=image_info["manifest"],
msg="Failed to serialize manifest into JSON formatted string",
)
else:
raise TagManifestNotFoundError(
tag=fulltag, msg="No manifest from get_image_info"
)
except AnchoreException as e:
event = events.TagManifestParseFailed(
user_id=userId, tag=fulltag, error=e.to_dict()
)
raise
parent_manifest = json.dumps(image_info.get("parentmanifest", {}))
try:
dbfilter = {
"registry": image_info["registry"],
"repo": image_info["repo"],
"tag": image_info["tag"],
"digest": image_info["digest"],
}
except Exception as err:
raise Exception(
"could not prepare db filter for complete lookup check - exception: "
+ str(err)
)
try:
stored_manifest = json.loads(
obj_mgr.get_document(
userId, "manifest_data", image_info["digest"]
)
)
if not stored_manifest:
raise Exception("stored manifest is empty")
except Exception as err:
logger.debug(
"found empty/invalid stored manifest, storing new: " + str(err)
)
rc = obj_mgr.put_document(
userId, "manifest_data", image_info["digest"], manifest
)
try:
stored_parent_manifest = json.loads(
obj_mgr.get_document(
userId, "parent_manifest_data", image_info["digest"]
)
)
if not stored_parent_manifest:
raise Exception("stored parent manifest is empty")
except Exception as err:
logger.debug(
"found empty/invalid stored parent manifest, storing new: "
+ str(err)
)
rc = obj_mgr.put_document(
userId,
"parent_manifest_data",
image_info["digest"],
parent_manifest,
)
logger.debug(
"checking image: looking up image in db using dbfilter: "
+ str(dbfilter)
)
with db.session_scope() as dbsession:
record = db_catalog_image.get_byimagefilter(
userId, "docker", dbfilter, session=dbsession
)
if record:
logger.debug(
"checking image: found match, no update, nothing to do: "
+ str(fulltag)
)
else:
logger.info(
"checking image: found latest digest for tag is not in DB: should update and queue for analysis: tag="
+ str(fulltag)
+ " latest_digest="
+ str(dbfilter["digest"])
)
# get the set of existing digests
try:
last_dbfilter = {}
last_dbfilter.update(dbfilter)
last_dbfilter.pop("digest", None)
last_digests = []
last_annotations = {}
is_latest = True
with db.session_scope() as dbsession:
last_image_records = db_catalog_image.get_byimagefilter(
userId, "docker", last_dbfilter, session=dbsession
)
if last_image_records:
for last_image_record in last_image_records:
imageDigest = last_image_record["imageDigest"]
for image_detail in last_image_record["image_detail"]:
last_digests.append(image_detail["digest"])
# only do this (bring forward annotations) for the first found digest (last digest associated with tag)
if is_latest:
if (
not last_annotations
and last_image_record["annotations"]
):
try:
if last_image_record.get(
"annotations", "{}"
):
last_annotations.update(
json.loads(
last_image_record.get(
"annotations", "{}"
)
)
)
except:
pass
is_latest = False
except Exception as err:
logger.error(str(err))
# add and store the new image
with db.session_scope() as dbsession:
logger.debug(
"adding new image from tag watcher " + str(image_info)
)
image_records = catalog_impl.add_or_update_image(
dbsession,
userId,
image_info["imageId"],
tags=[image_info["fulltag"]],
digests=[image_info["fulldigest"]],
parentdigest=image_info.get("parentdigest", None),
manifest=manifest,
parent_manifest=parent_manifest,
annotations=last_annotations,
)
if image_records:
image_record = image_records[0]
else:
image_record = {}
logger.info("checking image: added new image: " + str(image_record))
new_digests = [image_info["digest"]]
# construct the notification and queue
try:
npayload = {
"last_eval": last_digests,
"curr_eval": new_digests,
}
if last_annotations:
npayload["annotations"] = last_annotations
rc = notifications.queue_notification(
userId, fulltag, "tag_update", npayload
)
logger.debug("queued image tag update notification: " + fulltag)
# inobj = {
# 'userId': userId,
# 'subscription_key':fulltag,
# 'notificationId': str(uuid.uuid4()),
# 'last_eval':last_digests,
# 'curr_eval':new_digests,
# }
# if not simplequeue.is_inqueue(system_user_auth, 'tag_update', inobj):
# qobj = simplequeue.enqueue(system_user_auth, 'tag_update', inobj)
# logger.debug("queued image tag update notification: " + fulltag)
except Exception as err:
logger.error(
"failed to queue tag update notification - exception: "
+ str(err)
)
raise err
except Exception as err:
logger.error("failed to check/update image - exception: " + str(err))
finally:
if event:
with db.session_scope() as dbsession:
catalog_impl.add_event(event, dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def check_feedmeta_update(dbsession):
global feed_sync_updated
return feed_sync_updated
def check_policybundle_update(userId, dbsession):
global bundle_user_last_updated
is_updated = True
try:
last_bundle_update = 0
active_policy_record = db_policybundle.get_active_policy(
userId, session=dbsession
)
if active_policy_record:
last_bundle_update = active_policy_record["last_updated"]
else:
logger.warn(
"user has no active policy - queueing just in case" + str(userId)
)
return is_updated
if userId not in bundle_user_last_updated:
bundle_user_last_updated[userId] = last_bundle_update
if last_bundle_update == bundle_user_last_updated[userId]:
logger.debug("no bundle update detected since last cycle")
is_updated = False
else:
logger.debug("bundle update detected since last cycle")
bundle_user_last_updated[userId] = last_bundle_update
is_updated = True
except Exception as err:
logger.warn(
"failed to get/parse active policy bundle for user ("
+ str(userId)
+ ") - exception: "
+ str(err)
)
bundle_user_last_updated[userId] = 0
is_updated = True
return is_updated
def handle_policyeval(*args, **kwargs):
global system_user_auth, bundle_user_is_updated, feed_sync_updated
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(
["policy_engine", "simplequeue"]
)
if not all_ready:
logger.debug(
"FIRING DONE: policy eval (skipping due to required services not being available)"
)
try:
kwargs["mythread"]["last_return"] = False
except:
pass
return True
with db.session_scope() as dbsession:
feed_updated = check_feedmeta_update(dbsession)
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
for account in accounts:
userId = account["name"]
# policy evaluations
doperform = False
policy_subs = []
for subscription_type in ["policy_eval"]:
dbfilter = {"subscription_type": subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(
userId, session=dbsession, **dbfilter
)
for subscription_record in subscription_records:
if subscription_record["active"]:
image_info = anchore_engine.common.images.get_image_info(
userId,
"docker",
subscription_record["subscription_key"],
registry_lookup=False,
registry_creds=(None, None),
)
dbfilter = {
"registry": image_info["registry"],
"repo": image_info["repo"],
"tag": image_info["tag"],
}
if (
dbfilter,
subscription_record["subscription_value"],
) not in policy_subs:
policy_subs.append(
(dbfilter, subscription_record["subscription_value"])
)
for (dbfilter, value) in policy_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(
userId,
"docker",
dbfilter=dbfilter,
onlylatest=False,
session=dbsession,
)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value["digests"])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]["imageDigest"])
for image_record in image_records:
if image_record["analysis_status"] == taskstate.complete_state(
"analyze"
) and image_record["image_status"] == taskstate.base_state(
"image_status"
):
imageDigest = image_record["imageDigest"]
if imageDigest not in digests:
continue
fulltag = (
dbfilter["registry"]
+ "/"
+ dbfilter["repo"]
+ ":"
+ dbfilter["tag"]
)
# TODO - checks to avoid performing eval if nothing has changed
doperform = True
if doperform:
logger.debug(
"calling policy eval perform: "
+ str(fulltag)
+ " : "
+ str(imageDigest)
)
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_policy_evaluation(
userId, imageDigest, dbsession, evaltag=fulltag
)
except Exception as err:
logger.warn(
"policy evaluation failed - exception: "
+ str(err)
)
except Exception as err:
logger.warn(
"failure in policy eval / vuln scan handler - exception: " + str(err)
)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def handle_analyzer_queue(*args, **kwargs):
global system_user_auth
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
localconfig = anchore_engine.configuration.localconfig.get_config()
obj_mgr = object_store.get_manager()
max_working_time = 36000
try:
max_working_time = int(localconfig["image_analyze_timeout_seconds"])
except:
max_working_time = 36000
fair_share_enabled = True
try:
if (
str(
localconfig.get("services", {})
.get("catalog", {})
.get("fair_share_image_analysis_queueing", "True")
).lower()
== "false"
):
fair_share_enabled = False
except:
fair_share_enabled = True
all_ready = anchore_engine.clients.services.common.check_services_ready(
["policy_engine", "simplequeue"]
)
if not all_ready:
logger.debug(
"FIRING DONE: analyzer queuer (skipping due to required services not being available)"
)
try:
kwargs["mythread"]["last_return"] = False
except:
pass
return True
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(include_service=False)
q_client = internal_client_for(SimpleQueueClient, userId=None)
queue_rebalance = {}
highest_neg_queueId = -1 * (
1024 * 1000
) # choose a high value in the negative space as a starting point - this needs to be a value that fits when stored as 'big integer' SQL type
for account in accounts:
userId = account["name"]
if account["type"] == AccountTypes.service:
continue
if userId not in queue_rebalance:
queue_rebalance[userId] = {}
# do this in passes, for each analysis_status state
with db.session_scope() as dbsession:
dbfilter = {
"analysis_status": taskstate.working_state("analyze"),
"image_status": taskstate.base_state("image_status"),
}
workingstate_image_records = db_catalog_image.get_byfilter(
userId, session=dbsession, **dbfilter
)
# first, evaluate images looking for those that have been in working state for too long and reset
for image_record in workingstate_image_records:
imageDigest = image_record["imageDigest"]
if image_record["image_status"] == taskstate.complete_state("image_status"):
state_time = int(time.time()) - image_record["last_updated"]
logger.debug(
"image in working state for ("
+ str(state_time)
+ ")s - "
+ str(imageDigest)
)
if state_time > max_working_time:
logger.warn(
"image has been in working state ("
+ str(taskstate.working_state("analyze"))
+ ") for over ("
+ str(max_working_time)
+ ") seconds - resetting and requeueing for analysis"
)
image_record["analysis_status"] = taskstate.reset_state("analyze")
with db.session_scope() as dbsession:
db_catalog_image.update_record(image_record, session=dbsession)
# next, look for any image in base state (not_analyzed) for queuing
with db.session_scope() as dbsession:
dbfilter = {
"analysis_status": taskstate.base_state("analyze"),
"image_status": taskstate.base_state("image_status"),
}
basestate_image_records = db_catalog_image.get_byfilter(
userId, session=dbsession, **dbfilter
)
for basestate_image_record in basestate_image_records:
imageDigest = basestate_image_record["imageDigest"]
image_record = basestate_image_record
if image_record["image_status"] == taskstate.complete_state("image_status"):
logger.debug(
"image check of queue status for digest {}".format(imageDigest)
)
if image_record["analysis_status"] == taskstate.base_state("analyze"):
logger.debug("image in base state - " + str(imageDigest))
# TODO: This is expensive once the queue gets longer... need to find a more efficient way to check status
# The right way is keep a msg/task ID in the db record so we can do a quick lookup in the queue for the id rather than full content match
try:
manifest = obj_mgr.get_document(
userId, "manifest_data", image_record["imageDigest"]
)
except Exception as err:
logger.debug("failed to get manifest - {}".format(str(err)))
manifest = {}
try:
parent_manifest = obj_mgr.get_document(
userId, "parent_manifest_data", image_record["imageDigest"]
)
except Exception as err:
parent_manifest = {}
qobj = {}
qobj["userId"] = userId
qobj["imageDigest"] = image_record["imageDigest"]
qobj["manifest"] = manifest
qobj["parent_manifest"] = parent_manifest
try:
q_record = q_client.is_inqueue("images_to_analyze", qobj)
if not q_record:
# queue image for analysis
priority = False
logger.debug(
"queued image for analysis (priority={}): {}".format(
priority, str(imageDigest)
)
)
qobj = q_client.enqueue(
"images_to_analyze", qobj, forcefirst=priority
)
else:
logger.debug("image already queued")
# track and store the account's lowest queueId in the task queue, as well as the global highest negative space queueId across all accounts
try:
lowest_queueId = queue_rebalance[userId].get(
"lowest_queueId", None
)
if (
not lowest_queueId
or q_record.get("queueId") < lowest_queueId
):
queue_rebalance[userId][
"lowest_queueId"
] = q_record.get("queueId")
if (
q_record.get("queueId") < 0
and q_record.get("queueId") >= highest_neg_queueId
):
highest_neg_queueId = q_record.get("queueId")
except Exception as err:
logger.error(
"failed to store image current queueID - excpetion: {}".format(
err
)
)
except Exception as err:
logger.error(
"failed to check/queue image for analysis - exception: "
+ str(err)
)
# promote queued tasks into the analysis queue such that one image from each account is prioritized, to implement a simple 'fair share' across accounts
if fair_share_enabled:
try:
queue_id_updates = _perform_queue_rebalance(
queue_rebalance, highest_neg_queueId
)
for src, dst in queue_id_updates:
q_client.update_queueid(
"images_to_analyze", src_queueId=src, dst_queueId=dst
)
except:
logger.exception("Ignoring errors rebalancing analysis queue")
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def _perform_queue_rebalance(queue_rebalance, highest_neg_queueId):
ret = []
for userId in queue_rebalance.keys():
user_lowest_queueId = queue_rebalance[userId].get("lowest_queueId", None)
if user_lowest_queueId and user_lowest_queueId > 0:
# shuffle the task into neg space
highest_neg_queueId += 1
if highest_neg_queueId <= -1:
logger.spew(
"prioritizing user {} image in image analysis queue for fair-share (queueId={}, new_queueId={})".format(
userId, user_lowest_queueId, highest_neg_queueId
)
)
ret.append((user_lowest_queueId, highest_neg_queueId))
return ret
def handle_notifications(*args, **kwargs):
global system_user_auth
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
q_client = internal_client_for(SimpleQueueClient, userId=None)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
notification_timeout = int(
localconfig["webhooks"]["notification_retry_timeout"]
)
except:
notification_timeout = 30
logger.debug("notification timeout: " + str(notification_timeout))
# get the event log notification config
try:
event_log_config = (
localconfig.get("services", {})
.get("catalog", {})
.get("event_log", None)
)
if event_log_config and "notification" in event_log_config:
notify_events = event_log_config.get("notification").get(
"enabled", False
)
if notify_events and "level" in event_log_config.get("notification"):
event_levels = event_log_config.get("notification").get("level")
event_levels = [level.lower() for level in event_levels]
else:
event_levels = None
else:
notify_events = False
event_levels = None
except:
logger.exception("Ignoring errors parsing for event_log configuration")
notify_events = False
event_levels = None
# regular event queue notifications + event log notification
event_log_type = "event_log"
for subscription_type in anchore_engine.common.subscription_types + [
event_log_type
]:
logger.debug("notifier: " + subscription_type)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
try:
qlen = q_client.qlen(subscription_type)
except Exception as err:
logger.debug(
"problem looking for notifications in queue: "
+ str(subscription_type)
+ " - exception: "
+ str(err)
)
qlen = 0
while qlen > 0:
pupdate_record = q_client.dequeue(subscription_type)
if pupdate_record:
logger.debug(
"got notification from queue: "
+ json.dumps(pupdate_record, indent=4)
)
notification = pupdate_record["data"]
userId = notification["userId"]
subscription_key = notification["subscription_key"]
notificationId = notification["notificationId"]
for account in accounts:
try:
if userId == account["name"]:
notification_record = None
# new handling
subscription_type_actual = subscription_type
if (
notification.get("event", {})
.get("details", {})
.get("subscription_type", None)
in anchore_engine.common.subscription_types
):
subscription_type_actual = (
notification.get("event", {})
.get("details", {})
.get("subscription_type")
)
subscription_key_actual = (
notification.get("event", {})
.get("resource", {})
.get("id")
)
dbfilter = {
"subscription_type": subscription_type_actual,
"subscription_key": subscription_key_actual,
}
subscription_records = (
db_subscriptions.get_byfilter(
account["name"],
session=dbsession,
**dbfilter
)
)
if subscription_records:
subscription = subscription_records[0]
if subscription and subscription["active"]:
notification_transform = {
"notificationId": notification.get(
"notificationId"
),
"userId": notification.get("userId"),
"subscription_key": subscription_key_actual,
}
notification_transform.update(
notification.get("event", {}).get(
"details", {}
)
)
notification_record = (
notifications.make_notification(
account,
subscription_type_actual,
notification_transform,
)
)
else:
if notify_events and (
event_levels is None
or subscription_key.lower() in event_levels
):
notification.pop(
"subscription_key", None
) # remove subscription_key property from notification
notification_record = (
notifications.make_notification(
account, subscription_type, notification
)
)
if notification_record:
logger.spew(
"Storing NOTIFICATION: {} - {} - {}".format(
account,
notification_record,
subscription_type,
)
)
db_queues.add(
subscription_type_actual,
userId,
notificationId,
notification_record,
0,
int(time.time() + notification_timeout),
session=dbsession,
)
except Exception as err:
import traceback
traceback.print_exc()
logger.warn(
"cannot store notification to DB - exception: "
+ str(err)
)
qlen = q_client.qlen(subscription_type)
for account in accounts:
notification_records = db_queues.get_all(
subscription_type, account["name"], session=dbsession
)
for notification_record in notification_records:
logger.spew("drained to send: " + json.dumps(notification_record))
try:
rc = notifications.notify(account, notification_record)
if rc:
db_queues.delete_record(
notification_record, session=dbsession
)
except Exception as err:
logger.debug(
"failed to send notification, storing for retry - exception: "
+ str(err)
)
notification_record["tries"] = int(time.time())
if (
notification_record["tries"]
> notification_record["max_tries"]
):
logger.error(
"hit max notification timeout: dropping notificaion"
)
db_queues.delete_record(
notification_record, session=dbsession
)
else:
db_queues.update_record(
notification_record, session=dbsession
)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs["mythread"]["cycle_timer"]
while True:
# perform some DB read/writes for metrics gathering
if anchore_engine.subsys.metrics.is_enabled():
# DB probes
anchore_record = None
try:
with anchore_engine.subsys.metrics.get_summary_obj(
"anchore_db_read_seconds"
).time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read probe - exception: " + str(err))
if anchore_record:
try:
with anchore_engine.subsys.metrics.get_summary_obj(
"anchore_db_write_seconds"
).time() as mtimer:
with db.session_scope() as dbsession:
anchore_record["record_state_val"] = str(time.time())
rc = db_anchore.update_record(
anchore_record, session=dbsession
)
except Exception as err:
logger.warn(
"unable to perform DB write probe - exception: " + str(err)
)
try:
with anchore_engine.subsys.metrics.get_summary_obj(
"anchore_db_readwrite_seconds"
).time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
anchore_record["record_state_val"] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn(
"unable to perform DB read/write probe - exception: " + str(err)
)
# FS probes
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig["tmp_dir"]
svfs = os.statvfs(tmpdir)
available_bytes = svfs.f_bsize * svfs.f_bavail
anchore_engine.subsys.metrics.gauge_set(
"anchore_tmpspace_available_bytes", available_bytes
)
except Exception as err:
logger.warn(
"unable to detect available bytes probe - exception: " + str(err)
)
time.sleep(cycle_timer)
def handle_archive_tasks(*args, **kwargs):
"""
Handles periodic scan tasks for archive rule processing
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs["mythread"]["taskType"])
start_time = time.time()
logger.debug("FIRING: " + str(watcher))
task_id = None
account_names = []
try:
logger.info("Starting analysis archive transition rule processor")
with db.session_scope() as session:
# Get all enabled accounts
mgr = manager_factory.for_session(session)
accounts = mgr.list_accounts(
with_state=AccountStates.enabled, include_service=False
)
if accounts:
account_names = [x["name"] for x in accounts]
logger.debug("Found accounts {} with transition rules".format(accounts))
for account in account_names:
task = archiver.ArchiveTransitionTask(account)
task_id = task.task_id
logger.info(
"Starting archive transition task {} for account {}".format(
task.task_id, account
)
)
task.run()
logger.info("Archive transition task {} complete".format(task.task_id))
except Exception:
logger.exception("Caught unexpected exception")
finally:
logger.debug(
"Analysis archive task {} execution time: {} seconds".format(
task_id, time.time() - start_time
)
)
logger.debug("Sleeping until next cycle since no messages to process")
return True
def handle_image_gc(*args, **kwargs):
"""
Periodic handler for cleaning up images that are marked for deletion, can be extended to cover other states in the future
Serializes image deletion across the board to minimize the load on database
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
# iterate over all images marked for deletion
with db.session_scope() as dbsession:
dbfilter = {"image_status": taskstate.queued_state("image_status")}
queued_images = db_catalog_image.get_all_by_filter(
session=dbsession, **dbfilter
)
for to_be_deleted in queued_images:
try:
account = to_be_deleted["userId"]
digest = to_be_deleted["imageDigest"]
logger.debug(
"Starting image gc for account id: %s, digest: %s"
% (account, digest)
)
with db.session_scope() as dbsession:
logger.debug("Checking image status one final time")
expected_status = taskstate.queued_state("image_status")
current_status = db_catalog_image.get_image_status(
account, digest, dbsession
)
if current_status and current_status == expected_status:
# set force to true since all deletion checks should be cleared at this point
retobj, httpcode = catalog_impl.do_image_delete(
account, to_be_deleted, dbsession, force=True
)
if httpcode != 200:
logger.warn(
"Image deletion failed with error: {}".format(retobj)
)
else:
logger.warn(
"Skipping image gc due to status check mismatch. account id: %s, digest: %s, current status: %s, expected status: %s"
% (account, digest, current_status, expected_status)
)
# not necessary to state transition to deleted as the records should have gone
except:
logger.exception("Error deleting image, may retry on next cycle")
# TODO state transition to faulty to avoid further usage?
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function=watcher,
status="fail",
)
return True
click = 0
running = False
last_run = 0
system_user_auth = ("anchore-system", "")
# policy update check data
feed_sync_updated = False
bundle_user_last_updated = {}
bundle_user_is_updated = {}
default_lease_ttl = 60 # 1 hour ttl, should be more than enough in most cases
def watcher_func(*args, **kwargs):
global system_user_auth
while True:
logger.debug("starting generic watcher")
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
else:
q_client = internal_client_for(SimpleQueueClient, userId=None)
lease_id = None
try:
logger.debug("attempting dequeue")
qobj = q_client.dequeue("watcher_tasks", max_wait_seconds=30)
logger.debug("dequeue complete")
if qobj:
logger.debug("got task from queue: " + str(qobj))
watcher = qobj["data"]["watcher"]
handler = watchers[watcher]["handler"]
args = []
kwargs = {"mythread": watchers[watcher]}
lease_id = watchers[watcher]["task_lease_id"]
# Old way
timer = time.time()
if not lease_id:
logger.debug(
"No task lease defined for watcher {}, initiating without lock protection".format(
watcher
)
)
rc = handler(*args, **kwargs)
else:
rc = simplequeue.run_target_with_lease(
None,
lease_id,
handler,
ttl=default_lease_ttl,
*args,
**kwargs
)
else:
logger.debug("nothing in queue")
except (
simplequeue.LeaseAcquisitionFailedError,
simplequeue.LeaseUnavailableError,
) as e:
logger.debug(
"Lease acquisition could not complete, but this is probably due to another process with the lease: {}".format(
e
)
)
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
logger.debug("generic watcher done")
time.sleep(5)
def schedule_watcher(watcher):
global watchers, watcher_task_template, system_user_auth
if watcher not in watchers:
logger.warn(
"input watcher {} not in list of available watchers {}".format(
watcher, list(watchers.keys())
)
)
return False
if watchers[watcher]["taskType"]:
logger.debug("should queue job: " + watcher)
watcher_task = copy.deepcopy(watcher_task_template)
watcher_task["watcher"] = watcher
watcher_task["taskType"] = watchers[watcher]["taskType"]
try:
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue("watcher_tasks", watcher_task):
qobj = q_client.enqueue("watcher_tasks", watcher_task)
logger.debug(str(watcher_task) + ": init task queued: " + str(qobj))
else:
logger.debug(str(watcher_task) + ": init task already queued")
watchers[watcher]["last_queued"] = time.time()
except Exception as err:
logger.warn("failed to enqueue watcher task: " + str(err))
return True
def monitor_func(**kwargs):
global click, running, last_queued, system_user_auth, watchers, last_run
if click < 5:
click = click + 1
logger.debug("Catalog monitor starting in: " + str(5 - click))
return True
if running or ((time.time() - last_run) < kwargs["kick_timer"]):
return True
logger.debug("FIRING: catalog_monitor")
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig["system_user_auth"]
for watcher in list(watchers.keys()):
if not watchers[watcher]["initialized"]:
# first time
if "cycle_timers" in kwargs and watcher in kwargs["cycle_timers"]:
try:
the_cycle_timer = watchers[watcher]["cycle_timer"]
min_cycle_timer = watchers[watcher]["min_cycle_timer"]
max_cycle_timer = watchers[watcher]["max_cycle_timer"]
config_cycle_timer = int(kwargs["cycle_timers"][watcher])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer == 0:
watchers[watcher]["enabled"] = False
logger.debug(
"watcher '{}' has been explicitly disabled in config".format(
watcher
)
)
elif config_cycle_timer < min_cycle_timer:
logger.warn(
"configured cycle timer for handler ("
+ str(watcher)
+ ") is less than the allowed min ("
+ str(min_cycle_timer)
+ ") - using allowed min"
)
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn(
"configured cycle timer for handler ("
+ str(watcher)
+ ") is greater than the allowed max ("
+ str(max_cycle_timer)
+ ") - using allowed max"
)
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
watchers[watcher]["cycle_timer"] = the_cycle_timer
except Exception as err:
logger.warn(
"exception setting custom cycle timer for handler ("
+ str(watcher)
+ ") - using default"
)
watchers[watcher]["initialized"] = True
if watchers[watcher].get("enabled", True):
if watcher not in watcher_threads:
if watchers[watcher]["taskType"]:
# spin up a generic task watcher
logger.debug("starting generic task thread")
watcher_threads[watcher] = threading.Thread(
target=watcher_func, args=[watcher], kwargs={}
)
watcher_threads[watcher].start()
else:
# spin up a specific looping watcher thread
watcher_threads[watcher] = threading.Thread(
target=watchers[watcher]["handler"],
args=watchers[watcher]["args"],
kwargs={"mythread": watchers[watcher]},
)
watcher_threads[watcher].start()
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
elif (
time.time() - watchers[watcher]["last_queued"]
> watchers[watcher]["cycle_timer"]
):
rc = schedule_watcher(watcher)
except Exception as err:
logger.error(str(err))
finally:
logger.debug("FIRING DONE: catalog_monitor")
running = False
last_run = time.time()
logger.debug("exiting monitor thread")
monitor_thread = None
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew(
"MON: thread joined: isAlive=" + str(monitor_thread.isAlive())
)
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
class CatalogService(ApiService):
__service_name__ = "catalog"
__spec_dir__ = pkg_resources.resource_filename(__name__, "swagger")
__monitor_fn__ = monitor
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(LifeCycleStages.post_db, self._init_object_storage, {})
self.register_handler(LifeCycleStages.post_register, self._init_policies, {})
def _init_object_storage(self):
try:
did_init = object_store.initialize(
self.configuration,
manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
config_keys=[
DEFAULT_OBJECT_STORE_MANAGER_ID,
ALT_OBJECT_STORE_CONFIG_KEY,
],
allow_legacy_fallback=True,
)
if not did_init:
logger.warn(
"Unexpectedly found the object store already initialized. This is not an expected condition. Continuting with driver: {}".format(
object_store.get_manager().primary_client.__config_name__
)
)
except Exception as err:
logger.exception(
"Error initializing the object store: check catalog configuration"
)
raise err
try:
archive.initialize(self.configuration)
except Exception as err:
logger.exception(
"Error initializing analysis archive: check catalog configuration"
)
raise err
def _init_policies(self):
"""
Ensure all accounts have a default policy in place
:return:
"""
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
for account_dict in mgr.list_accounts(include_service=False):
try:
logger.info("Initializing a new account")
userId = account_dict[
"name"
] # Old keys are userId, now that maps to account name
bundle_records = db_policybundle.get_all_byuserId(
userId, session=dbsession
)
if not bundle_records:
logger.debug(
"Account {} has no policy bundle - installing default".format(
userId
)
)
config = self.global_configuration
# What to do with each policy bundle
def process_bundle(policy_bundle, bundle):
bundle_url = obj_mgr.put_document(
userId, "policy_bundles", bundle["id"], bundle
)
policy_record = make_policy_record(
userId, bundle, policy_bundle["active"]
)
rc = db_policybundle.add(
policy_record["policyId"],
userId,
policy_bundle["active"],
policy_record,
session=dbsession,
)
if not rc:
raise Exception("policy bundle DB add failed")
# How to handle any exceptions form opening the bundle file or converting
# its contents to json
def process_exception(exception):
if isinstance(exception, IntegrityError):
logger.warn(
"another process has already initialized, continuing"
)
else:
logger.error(
"could not load up default bundle for user - exception: "
+ str(exception)
)
anchore_engine.configuration.localconfig.load_policy_bundles(
config, process_bundle, process_exception
)
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn(
"another process has already initialized, continuing"
)
else:
raise Exception(
"unable to initialize default user data - exception: "
+ str(err)
)
@staticmethod
def get_image_content(account_id, content_type, image_digest):
if content_type == "manifest":
getter = ImageManifestContentGetter(account_id, content_type, image_digest)
elif content_type == "dockerfile":
getter = ImageDockerfileContentGetter(
account_id, content_type, image_digest
)
else:
getter = ImageContentGetter(account_id, content_type, image_digest)
return getter.get()
def delete_import_operation(dbsession, operation: ImageImportOperation):
"""
Execute the deletion path for an import operation
:param dbsession:
:param operation:
:return:
"""
logger.info("garbage collecting import operation: %s", operation.uuid)
obj_mgr = object_store.get_manager()
failed = False
for content in operation.contents:
try:
logger.debug(
"deleting import content digest %s of type %s for operation %s",
content.digest,
content.content_type,
operation.uuid,
)
obj_mgr.delete_document(
userId=operation.account,
bucket=content.content_storage_bucket,
archiveid=content.content_storage_key,
)
dbsession.delete(content)
logger.debug(
"deleted import content digest %s of type %s for operation %s successfully",
content.digest,
content.content_type,
operation.uuid,
)
except:
logger.debug_exception(
"could not delete import content of type %s for operation %s with digest %s",
content.content_type,
operation.uuid,
content.digest,
)
failed = True
if not failed:
uuid = operation.uuid
dbsession.delete(operation)
else:
return operation
logger.info("garbage collection of import operation %s complete", operation.uuid)
return None
def garbage_collect_imports():
"""
Flush all imports that are in a state ready for collection
:return:
"""
# iterate over all imports ready for GC
with db.session_scope() as dbsession:
to_clean = dbsession.query(ImageImportOperation).filter(
ImageImportOperation.status.in_(
[
ImportState.invalidated,
ImportState.complete,
ImportState.failed,
ImportState.expired,
]
)
)
for op in to_clean:
try:
logger.debug(
"Starting import operation gc for account id: %s, operation id: %s"
% (op.account, op.uuid)
)
delete_import_operation(dbsession, op)
except:
logger.exception("Error deleting image, may retry on next cycle")
def expire_imports():
"""
Flush all imports that are in a state ready for collection
:return:
"""
# iterate over all imports ready for GC
with db.session_scope() as dbsession:
for operation in dbsession.query(ImageImportOperation).filter(
ImageImportOperation.status.in_(
[ImportState.pending, ImportState.processing]
),
ImageImportOperation.expires_at < datetime.datetime.utcnow(),
):
operation.status = ImportState.expired
def handle_import_gc(*args, **kwargs):
"""
Cleanup import operations that are expired or complete and reclaim resources
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs["mythread"]["taskType"])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
garbage_collect_imports()
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
try:
expire_imports()
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs["mythread"]["last_return"] = handler_success
except:
pass
watchers = {
"image_watcher": {
"handler": handle_image_watcher,
"task_lease_id": "image_watcher",
"taskType": "handle_image_watcher",
"args": [],
"cycle_timer": 600,
"min_cycle_timer": 300,
"max_cycle_timer": 86400 * 7,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"repo_watcher": {
"handler": handle_repo_watcher,
"task_lease_id": "repo_watcher",
"taskType": "handle_repo_watcher",
"args": [],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 86400 * 7,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"policy_eval": {
"handler": handle_policyeval,
"task_lease_id": "policy_eval",
"taskType": "handle_policyeval",
"args": [],
"cycle_timer": 300,
"min_cycle_timer": 60,
"max_cycle_timer": 86400 * 2,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"analyzer_queue": {
"handler": handle_analyzer_queue,
"task_lease_id": "analyzer_queue",
"taskType": "handle_analyzer_queue",
"args": [],
"cycle_timer": 5,
"min_cycle_timer": 1,
"max_cycle_timer": 7200,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"notifications": {
"handler": handle_notifications,
"task_lease_id": "notifications",
"taskType": "handle_notifications",
"args": [],
"cycle_timer": 10,
"min_cycle_timer": 10,
"max_cycle_timer": 86400 * 2,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"vulnerability_scan": {
"handler": handle_vulnerability_scan,
"task_lease_id": "vulnerability_scan",
"taskType": "handle_vulnerability_scan",
"args": [],
"cycle_timer": 300,
"min_cycle_timer": 60,
"max_cycle_timer": 86400 * 2,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"account_resource_cleanup": {
"handler": handle_account_resource_cleanup,
"task_lease_id": "account_resource_cleanup",
"taskType": "handle_account_resource_cleanup",
"args": [],
"cycle_timer": 30,
"min_cycle_timer": 30,
"max_cycle_timer": 30,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"service_watcher": {
"handler": handle_service_watcher,
"task_lease_id": False,
"taskType": None,
"args": [],
"cycle_timer": 10,
"min_cycle_timer": 1,
"max_cycle_timer": 300,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"service_heartbeat": {
"handler": anchore_engine.subsys.servicestatus.handle_service_heartbeat,
"task_lease_id": False,
"taskType": None,
"args": [CatalogService.__service_name__],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 60,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"handle_metrics": {
"handler": handle_metrics,
"task_lease_id": False,
"taskType": None,
"args": [],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 60,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"archive_tasks": {
"handler": handle_archive_tasks,
"task_lease_id": "archive_transitions",
"taskType": "handle_archive_tasks",
"args": [],
"cycle_timer": 43200,
"min_cycle_timer": 60,
"max_cycle_timer": 86400 * 5,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"image_gc": {
"handler": handle_image_gc,
"task_lease_id": "image_gc",
"taskType": "handle_image_gc",
"args": [],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 86400,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"import_gc": {
"handler": handle_import_gc,
"task_lease_id": "import_gc",
"taskType": "handle_import_gc",
"args": [],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 86400,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
}
watcher_task_template = {
"taskType": None,
"watcher": None,
}
watcher_threads = {}
|
client.py | from socketIO_client import SocketIO, LoggingNamespace
#import webbrowser
import webview
import threading
socketIO = SocketIO('f237c8a0.ngrok.io', 80, LoggingNamespace)
def on_connect():
print("Connected.")
def on_thing(data):
if data["devHand"] == True:
print("Received message from Google Assistant")
def on_stackoverflow(data):
if data["devHand"] == True:
try:
webview.destroy_window()
except Exception:
pass
print("Received StackOverflow question:", data["query"])
#webbrowser.open(data["link"])
with open("style.css") as css_file:
css = css_file.read()
html = f"""<!DOCTYPE html>
<html>
<head>
<title>{data['query']}</title>
<style>
{css}
</style>
</head>
<body>
<div id="answer">
<h3 style="answer-heading">BEST ANSWER ({data['score']})</h3>
{data['html']}
</div>
</body>
</html>
"""
def load_html():
webview.load_html(html)
t = threading.Thread(target=load_html)
t.start()
webview.create_window(data["query"], width=500, height=700)
socketIO.on('connect', on_connect)
socketIO.on("thing", on_thing)
socketIO.on("stackoverflow", on_stackoverflow)
socketIO.wait()
|
main.py | import time
import asyncio
import threading
import click
import sys
import os
# Python imports will be the end of us all
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import RaccoonException, HostHandlerException
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.logger import SystemOutLogger
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.lib.fuzzer import URLFuzzer
from raccoon_src.lib.host import Host
from raccoon_src.lib.scanner import Scanner, NmapScan
from raccoon_src.lib.sub_domain import SubDomainEnumerator
from raccoon_src.lib.dns_handler import DNSHandler
from raccoon_src.lib.waf import WAF
from raccoon_src.lib.tls import TLSHandler
from raccoon_src.lib.web_app import WebApplicationScanner
# Set path for relative access to builtin files.
MY_PATH = os.path.abspath(os.path.dirname(__file__))
def intro(logger):
logger.info("""{}
_____ _____ _____ ____ ____ _ _
| __ \ /\ / ____| / ____| / __ \ / __ \ | \ | |
| |__) | / \ | | | | | | | | | | | | | \| |
| _ / / /\ \ | | | | | | | | | | | | | . ` |
| | \ \ / ____ \ | |____ | |____ | |__| | | |__| | | |\ |
|_| \_\ /_/ \_\ \_____| \_____| \____/ \____/ |_| \_|
{}
4841434b544845504c414e4554
https://github.com/evyatarmeged/Raccoon
-------------------------------------------------------------------
""".format(COLOR.GRAY, COLOR.RESET))
@click.command()
@click.version_option("0.0.8")
@click.option("-t", "--target", required=True, help="Target to scan")
@click.option("-d", "--dns-records", default="A,MX,NS,CNAME,SOA,TXT",
help="Comma separated DNS records to query. Defaults to: A,MX,NS,CNAME,SOA,TXT")
@click.option("--tor-routing", is_flag=True, help="Route HTTP traffic through Tor (uses port 9050)."
" Slows total runtime significantly")
@click.option("--proxy-list", help="Path to proxy list file that would be used for routing HTTP traffic."
" A proxy from the list will be chosen at random for each request."
" Slows total runtime")
@click.option("-c", "--cookies", help="Comma separated cookies to add to the requests. "
"Should be in the form of key:value\n"
"Example: PHPSESSID:12345,isMobile:false")
@click.option("--proxy", help="Proxy address to route HTTP traffic through. Slows total runtime")
@click.option("-w", "--wordlist", default=os.path.join(MY_PATH, "wordlists/fuzzlist"),
help="Path to wordlist that would be used for URL fuzzing")
@click.option("-T", "--threads", default=25,
help="Number of threads to use for URL Fuzzing/Subdomain enumeration. Default: 25")
@click.option("--ignored-response-codes", default="302,400,401,402,403,404,503,504",
help="Comma separated list of HTTP status code to ignore for fuzzing."
" Defaults to: 302,400,401,402,403,404,503,504")
@click.option("--subdomain-list", default=os.path.join(MY_PATH, "wordlists/subdomains"),
help="Path to subdomain list file that would be used for enumeration")
@click.option("-sc", "--scripts", is_flag=True, help="Run Nmap scan with -sC flag")
@click.option("-sv", "--services", is_flag=True, help="Run Nmap scan with -sV flag")
@click.option("-f", "--full-scan", is_flag=True, help="Run Nmap scan with both -sV and -sC")
@click.option("-p", "--port", help="Use this port range for Nmap scan instead of the default")
@click.option("-fr", "--follow-redirects", is_flag=True, default=False,
help="Follow redirects when fuzzing. Default: False (will not follow redirects)")
@click.option("--tls-port", default=443, help="Use this port for TLS queries. Default: 443")
@click.option("--skip-health-check", is_flag=True, help="Do not test for target host availability")
@click.option("--no-url-fuzzing", is_flag=True, help="Do not fuzz URLs")
@click.option("--no-sub-enum", is_flag=True, help="Do not bruteforce subdomains")
@click.option("--skip-nmap-scan", is_flag=True, help="Do not perform an Nmap scan")
# @click.option("-d", "--delay", default="0.25-1",
# help="Min and Max number of seconds of delay to be waited between requests\n"
# "Defaults to Min: 0.25, Max: 1. Specified in the format of Min-Max")
@click.option("-q", "--quiet", is_flag=True, help="Do not output to stdout")
@click.option("-o", "--outdir", default="Raccoon_scan_results",
help="Directory destination for scan output")
def main(target,
tor_routing,
proxy_list,
proxy,
cookies,
dns_records,
wordlist,
threads,
ignored_response_codes,
subdomain_list,
full_scan,
scripts,
services,
port,
tls_port,
skip_health_check,
follow_redirects,
no_url_fuzzing,
no_sub_enum,
skip_nmap_scan,
# delay,
outdir,
quiet):
try:
# ------ Arg validation ------
# Set logging level and Logger instance
log_level = HelpUtilities.determine_verbosity(quiet)
logger = SystemOutLogger(log_level)
intro(logger)
target = target.lower()
try:
HelpUtilities.validate_executables()
except RaccoonException as e:
logger.critical(str(e))
exit(9)
HelpUtilities.validate_wordlist_args(proxy_list, wordlist, subdomain_list)
HelpUtilities.validate_proxy_args(tor_routing, proxy, proxy_list)
HelpUtilities.create_output_directory(outdir)
if tor_routing:
logger.info("{} Testing that Tor service is up...".format(COLORED_COMBOS.NOTIFY))
elif proxy_list:
if proxy_list and not os.path.isfile(proxy_list):
raise FileNotFoundError("Not a valid file path, {}".format(proxy_list))
else:
logger.info("{} Routing traffic using proxies from list {}\n".format(
COLORED_COMBOS.NOTIFY, proxy_list))
elif proxy:
logger.info("{} Routing traffic through proxy {}\n".format(COLORED_COMBOS.NOTIFY, proxy))
# TODO: Sanitize delay argument
dns_records = tuple(dns_records.split(","))
ignored_response_codes = tuple(int(code) for code in ignored_response_codes.split(","))
if port:
HelpUtilities.validate_port_range(port)
# ------ /Arg validation ------
if cookies:
try:
cookies = HelpUtilities.parse_cookie_arg(cookies)
except RaccoonException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(2)
# Set Request Handler instance
request_handler = RequestHandler(
proxy_list=proxy_list,
tor_routing=tor_routing,
single_proxy=proxy,
cookies=cookies
)
if tor_routing:
try:
HelpUtilities.confirm_traffic_routs_through_tor()
logger.info("{} Validated Tor service is up. Routing traffic anonymously\n".format(
COLORED_COMBOS.NOTIFY))
except RaccoonException as err:
print("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(3)
main_loop = asyncio.get_event_loop()
logger.info("{}### Raccoon Scan Started ###{}\n".format(COLOR.GRAY, COLOR.RESET))
logger.info("{} Trying to gather information about host: {}".format(COLORED_COMBOS.INFO, target))
# TODO: Populate array when multiple targets are supported
# hosts = []
try:
host = Host(target=target, dns_records=dns_records)
host.parse()
except HostHandlerException as e:
logger.critical("{}{}{}".format(COLOR.RED, str(e), COLOR.RESET))
exit(11)
if not skip_health_check:
try:
HelpUtilities.validate_target_is_up(host)
except RaccoonException as err:
logger.critical("{}{}{}".format(COLOR.RED, str(err), COLOR.RESET))
exit(42)
if not skip_nmap_scan:
logger.info("\n{} Setting Nmap scan to run in the background".format(COLORED_COMBOS.INFO))
nmap_scan = NmapScan(host, full_scan, scripts, services, port)
# # # TODO: Populate array when multiple targets are supported
# nmap_threads = []
nmap_thread = threading.Thread(target=Scanner.run, args=(nmap_scan,))
# Run Nmap scan in the background. Can take some time
nmap_thread.start()
# Run first set of checks - TLS, Web/WAF Data, DNS data
waf = WAF(host)
tls_info_scanner = TLSHandler(host, tls_port)
web_app_scanner = WebApplicationScanner(host)
tasks = (
asyncio.ensure_future(tls_info_scanner.run()),
asyncio.ensure_future(waf.detect()),
asyncio.ensure_future(DNSHandler.grab_whois(host)),
asyncio.ensure_future(web_app_scanner.run_scan()),
asyncio.ensure_future(DNSHandler.generate_dns_dumpster_mapping(host, logger))
)
main_loop.run_until_complete(asyncio.wait(tasks))
# Second set of checks - URL fuzzing, Subdomain enumeration
if not no_url_fuzzing:
fuzzer = URLFuzzer(host, ignored_response_codes, threads, wordlist, follow_redirects)
main_loop.run_until_complete(fuzzer.fuzz_all())
if not host.is_ip:
sans = tls_info_scanner.sni_data.get("SANs")
subdomain_enumerator = SubDomainEnumerator(
host,
domain_list=subdomain_list,
sans=sans,
ignored_response_codes=ignored_response_codes,
num_threads=threads,
follow_redirects=follow_redirects,
no_sub_enum=no_sub_enum
)
main_loop.run_until_complete(subdomain_enumerator.run())
if not skip_nmap_scan:
if nmap_thread.is_alive():
logger.info("{} All scans done. Waiting for Nmap scan to wrap up. "
"Time left may vary depending on scan type and port range".format(COLORED_COMBOS.INFO))
while nmap_thread.is_alive():
time.sleep(15)
logger.info("\n{}### Raccoon scan finished ###{}\n".format(COLOR.GRAY, COLOR.RESET))
os.system("stty sane")
except KeyboardInterrupt:
print("{}Keyboard Interrupt detected. Exiting{}".format(COLOR.RED, COLOR.RESET))
# Fix F'd up terminal after CTRL+C
os.system("stty sane")
exit(42)
if __name__ == "__main__":
main()
|
pyserver.pyw | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import os #get curdir
import subprocess #for receive-trigger
import json
import socket as sk
import threading as th
from time import sleep
from datetime import datetime
#settings-default
MAX_BUF_SIZE = 0xFFFF
TIMEOUT = 5.0
DELIMITER = " "
STR_ENCODE = "utf-8"
def hexstring_bytes(hexstr: str, delimiter=" "):
cvtdata = hexstr.split(delimiter)
return bytes(map(lambda s_:int(s_, base=16), cvtdata))
#communication init function
def sock_init(protocol :str, port :int) ->sk.socket:
if protocol == "UDP":
sock = sk.socket(sk.AF_INET, sk.SOCK_DGRAM)
sock.bind(("", port))
return sock
elif protocol == "TCP":
sock = sk.socket(sk.AF_INET, sk.SOCK_STREAM)
sock.bind(("", port))
return sock
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"PyServer", pos = wx.DefaultPosition, size = wx.Size( 600,350 ), style = wx.CAPTION|wx.CLOSE_BOX|wx.MINIMIZE_BOX|wx.TAB_TRAVERSAL, name = u"PySerer" )
self.data = b""
self.CreateStatusBar()
self.SetStatusText("")
self.client = None
self.udpsock = None
self.tcpsock = None
self.tcpsock_c = None
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_3DFACE ) )
mainSizer = wx.GridSizer( 1, 2, 0, 0 )
self.m_textCtrl9 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE|wx.TE_RICH2|wx.TE_READONLY )
mainSizer.Add( self.m_textCtrl9, 0, wx.ALL|wx.EXPAND, 5 )
settingSizer = wx.BoxSizer( wx.VERTICAL )
SendbackSizer = wx.BoxSizer( wx.VERTICAL )
#self.m_checkBox1 = wx.CheckBox( self, wx.ID_ANY, u"Enable receive-triger program", wx.DefaultPosition, wx.DefaultSize, 0 )
#SendbackSizer.Add( self.m_checkBox1, 0, wx.ALL, 5 )
#SendbackSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"receive-trigger Program path", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
SendbackSizer.Add( self.m_staticText2, 0, wx.ALL, 5 )
scriptSizer = wx.BoxSizer( wx.HORIZONTAL )
self.m_textCtrl3 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 170,25 ), 0 )
scriptSizer.Add( self.m_textCtrl3, 0, wx.ALL, 5 )
self.m_button1 = wx.Button( self, wx.ID_ANY, u"Refer", wx.DefaultPosition, wx.DefaultSize, 0 )
scriptSizer.Add( self.m_button1, 0, wx.ALL, 5 )
SendbackSizer.Add( scriptSizer, 1, wx.EXPAND, 5 )
settingSizer.Add( SendbackSizer, 1, wx.EXPAND, 5 )
self.m_radioBtn6 = wx.RadioButton( self, wx.ID_ANY, u"UDP", wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.m_radioBtn6.SetValue(True)
settingSizer.Add( self.m_radioBtn6, 0, wx.ALL, 5 )
self.m_radioBtn7 = wx.RadioButton( self, wx.ID_ANY, u"TCP", wx.DefaultPosition, wx.DefaultSize, 0 )
settingSizer.Add( self.m_radioBtn7, 0, wx.ALL, 5 )
portSizer = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"port", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
portSizer.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.m_spinCtrl1 = wx.SpinCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.SP_ARROW_KEYS, 0, 65535, 8080 )
portSizer.Add( self.m_spinCtrl1, 0, wx.ALL, 5 )
#portSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
#self.m_button9 = wx.Button( self, wx.ID_ANY, u"reflesh", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
#portSizer.Add( self.m_button9, 0, wx.ALL, 5 )
settingSizer.Add( portSizer, 1, wx.EXPAND, 5 )
windowSizer = wx.BoxSizer( wx.HORIZONTAL )
self.m_button4 = wx.Button( self, wx.ID_ANY, u"clear", wx.DefaultPosition, wx.Size( 50,25 ), 0 )
windowSizer.Add( self.m_button4, 0, wx.ALL, 5 )
self.m_radioBtn71 = wx.RadioButton( self, wx.ID_ANY, STR_ENCODE, wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
self.m_radioBtn71.SetValue(True)
windowSizer.Add( self.m_radioBtn71, 0, wx.ALL, 5 )
self.m_radioBtn10 = wx.RadioButton( self, wx.ID_ANY, u"deci", wx.DefaultPosition, wx.DefaultSize, 0 )
windowSizer.Add( self.m_radioBtn10, 0, wx.ALL, 5 )
self.m_radioBtn8 = wx.RadioButton( self, wx.ID_ANY, u"hex", wx.DefaultPosition, wx.DefaultSize, 0 )
windowSizer.Add( self.m_radioBtn8, 0, wx.ALL, 5 )
settingSizer.Add( windowSizer, 1, wx.EXPAND, 5 )
sendSizer = wx.BoxSizer( wx.HORIZONTAL )
self.m_textCtrl6 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE )
sendSizer.Add( self.m_textCtrl6, 1, wx.ALL, 5 )
sendcfgSizer = wx.BoxSizer(wx.VERTICAL)
self.m_radioBtn1x = wx.RadioButton( self, wx.ID_ANY, STR_ENCODE, wx.DefaultPosition, wx.DefaultSize, wx.RB_GROUP )
sendcfgSizer.Add( self.m_radioBtn1x, 0, wx.ALL, 5 )
self.m_radioBtn2x = wx.RadioButton( self, wx.ID_ANY, u"hex", wx.DefaultPosition, wx.DefaultSize, 0 )
sendcfgSizer.Add( self.m_radioBtn2x, 0, wx.ALL, 5 )
sendSizer.Add(sendcfgSizer, 1, wx.EXPAND, 5)
self.m_button91 = wx.Button( self, wx.ID_ANY, u"Send", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_button91.Enable(False)
sendSizer.Add( self.m_button91, 0, wx.ALL, 5 )
settingSizer.Add( sendSizer, 1, wx.EXPAND, 5 )
ControlSizer = wx.BoxSizer( wx.HORIZONTAL )
self.m_button2 = wx.Button( self, wx.ID_ANY, u"Start", wx.DefaultPosition, wx.DefaultSize, 0 )
self.actionflg = False
ControlSizer.Add( self.m_button2, 0, wx.ALL, 5 )
self.m_button3 = wx.Button( self, wx.ID_ANY, u"Save", wx.DefaultPosition, wx.DefaultSize, 0 )
ControlSizer.Add( self.m_button3, 0, wx.ALL, 5 )
settingSizer.Add( ControlSizer, 1, wx.EXPAND, 5 )
mainSizer.Add( settingSizer, 1, wx.EXPAND, 5 )
self.SetSizer( mainSizer )
self.Layout()
self.Centre( wx.BOTH )
self.refleshflg = False
# Connect Events
self.m_button1.Bind( wx.EVT_BUTTON, self.fileWindow )
#self.m_button9.Bind( wx.EVT_BUTTON, self.reflesh )
self.m_button4.Bind( wx.EVT_BUTTON, self.clear )
self.m_button2.Bind( wx.EVT_BUTTON, self.action )
self.m_button3.Bind( wx.EVT_BUTTON, self.showsave )
self.m_button91.Bind( wx.EVT_BUTTON, self.send_btn)
#my code
self.looptask_udp = th.Thread(target=self.UDP_server)
self.looptask_tcp = th.Thread(target=self.TCP_server)
self.looptask_tcp.setDaemon(True)
self.looptask_udp.setDaemon(True)
self.looptask_udp.start()
self.looptask_tcp.start()
self.Bind(wx.EVT_CLOSE, self.ExitHandler)
def __del__( self ):
pass
def ExitHandler(self, event):
event.Skip()
self.Destroy()
exit(0)
# Virtual event handlers, overide them in your derived class
def fileWindow( self, event ):
event.Skip()
pathwdw = wx.FileDialog(None,wildcard="python Files(*.py)|*.py", defaultDir=os.curdir)
if pathwdw.ShowModal() == wx.ID_OK:
self.m_textCtrl3.Value = pathwdw.GetPath()
pathwdw.Close()
else:
pass
def reflesh( self, event ):
event.Skip()
def clear( self, event ):
event.Skip()
self.m_textCtrl9.SetValue("")
self.m_textCtrl6.SetValue("")
def action( self, event ):
if self.actionflg:
self.m_radioBtn6.Enable(True)
self.m_radioBtn7.Enable(True)
self.m_spinCtrl1.Enable(True)
self.m_button91.Enable(False)
self.actionflg = False
try:
if self.m_radioBtn6.Value:#UDP
self.udpsock.close()
self.SetStatusText("UDP socket is closed.")
elif self.m_radioBtn7.Value:#TCP
self.tcpsock.close()
self.SetStatusText("TCP socket is closed.")
except OSError:
pass
self.m_button2.SetLabelText("Start")
else:
self.m_radioBtn6.Enable(False)
self.m_radioBtn7.Enable(False)
self.m_spinCtrl1.Enable(False)
self.m_button91.Enable(True)
self.actionflg = True
self.m_button2.SetLabelText("Stop")
event.Skip()
def showsave( self, event ):
#dialog = MyDialog1(self, self.m_textCtrl9.Value)
#dialog.Show()
event.Skip()
now_ts = datetime.now()
timestr = str(now_ts.year)+"_"+str(now_ts.day)+"_"+str(now_ts.hour)+str(now_ts.minute)+str(now_ts.second)
dialog = wx.FileDialog(self, defaultDir=os.curdir, defaultFile="pyserverlog"+timestr+".txt")
if dialog.ShowModal() == wx.ID_OK:
with open(dialog.GetPath(), "w") as f:
f.write(self.m_textCtrl9.Value)
wx.MessageBox("Save successfull.")
def send_btn(self, event):
if self.actionflg:
try:
if self.m_radioBtn6.GetValue():#UDP
if self.m_radioBtn1x.GetValue():#UTF-8
senddata = bytes(self.m_textCtrl6.GetValue(), STR_ENCODE)
sendlen = self.udpsock.sendto(senddata, self.client)
elif self.m_radioBtn2x.GetValue():#hex mode
sendtxtdata:str = self.m_textCtrl6.GetValue()
senddata = hexstring_bytes(sendtxtdata, DELIMITER)
sendlen = self.udpsock.sendto(senddata, self.client)
elif self.m_radioBtn7.GetValue():#TCP
if self.m_radioBtn1x.GetValue():#UTF-8
senddata = bytes(self.m_textCtrl6.GetValue(), STR_ENCODE)
sendlen = self.udpsock.sendto(senddata, self.client)
elif self.m_radioBtn2x.GetValue():#hex mode
sendtxtdata:str = self.m_textCtrl6.GetValue()
senddata = hexstring_bytes(sendtxtdata, DELIMITER)
sendlen = self.udpsock.sendto(senddata, self.client)
except TypeError or AttributeError:
wx.MessageBox("Sending failed. (No client)")
return
except ValueError:
wx.MessageBox("Text convert failed. (delimiter=\"{}\")".format(DELIMITER))
return
#wx.MessageBox("{} bytes have been sent.".format(sendlen))
self.SetStatusText("send: {}".format(senddata))
else:
wx.MessageBox("First of all, click \"Start\".")
def UDP_server(self):
while True:
if self.m_radioBtn6.GetValue() and self.actionflg:#is UDP mode
try:
self.tcpsock_c.close()
self.tcpsock.close()
except AttributeError:
pass
print(self.m_spinCtrl1.GetValue())
self.udpsock = sock_init("UDP", self.m_spinCtrl1.GetValue())
self.SetStatusText("Inirialize UDP socket.")
while self.m_radioBtn6.GetValue() and self.actionflg:#while UDP-mode&Act-mode
try:
recvinfo = self.udpsock.recvfrom(MAX_BUF_SIZE)
except OSError:
break
self.data = recvinfo[0]
self.client = recvinfo[1]
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
with subprocess.Popen(["python", self.m_textCtrl3.GetValue(), str(self.data)],startupinfo=startupinfo , stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) as sp:
sp.wait()
try:
ifget = eval(sp.communicate()[0])
except SyntaxError:
ifget = None
#$python Arbitrary_code. bytes(data) -> response-read from stdout
if not(ifget == b"" or ifget == None):
self.udpsock.sendto(ifget, self.client)
self.SetStatusText("Send back: {}".format(ifget))
else:
self.SetStatusText("No send back data.")
#Put data
if self.m_radioBtn71.GetValue():#string-mode
self.m_textCtrl9.Value += self.data.decode(STR_ENCODE, "replace")
elif self.m_radioBtn10.GetValue():#decimal-mode
for _char in self.data:
self.m_textCtrl9.Value += str(_char)+DELIMITER
elif self.m_radioBtn8.GetValue():#hex-mode
for _char in self.data:
self.m_textCtrl9.Value += hex(_char)+DELIMITER
sleep(0.001)#1ms
self.udpsock.close()
self.udpsock = None
self.client = None
else:
sleep(0.01)#10ms
def TCP_server(self):
while True:
if self.actionflg and self.m_radioBtn7.GetValue():
try:
self.udpsock.close()
except AttributeError:
pass
self.tcpsock = sock_init("TCP", self.m_spinCtrl1.GetValue())
self.SetStatusText("Inirialize TCP socket.")
try:
self.tcpsock.listen(5)
accinfo = self.tcpsock.accept()
self.tcpsock_c = accinfo[0]
self.client = accinfo[1]
self.tcpsock_c.settimeout(TIMEOUT)
except OSError:
break
while self.actionflg and self.m_radioBtn7.GetValue():
try:
recvinfo = self.udpsock.recvfrom(MAX_BUF_SIZE)
except OSError:
break
self.data = recvinfo[0]
self.client = recvinfo[1]
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
with subprocess.Popen(["python", self.m_textCtrl3.GetValue(), str(self.data)],startupinfo=startupinfo , stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) as sp:
sp.wait()
try:
ifget = eval(sp.communicate()[0])
except SyntaxError:
ifget = None
#$python Arbitrary_code. bytes(data) -> response-read from stdout
if not(ifget == b"" or ifget == None):
self.udpsock.sendto(ifget, self.client)
self.SetStatusText("Send back: {}".format(ifget))
else:
self.SetStatusText("No send back data.")
#Put data
if self.m_radioBtn71.GetValue():#string-mode
self.m_textCtrl9.Value += self.data.decode(STR_ENCODE, "replace")
elif self.m_radioBtn10.GetValue():#decimal-mode
for _char in self.data:
self.m_textCtrl9.Value += str(_char)+DELIMITER
elif self.m_radioBtn8.GetValue():#hex-mode
for _char in self.data:
self.m_textCtrl9.Value += hex(_char)+DELIMITER
sleep(0.001)
self.tcpsock_c.close()
self.client = None
self.tcpsock.close()
self.tcpsock = None
sleep(0.01)
def frameLoop(self):
while True:
self.udpsock = sock_init("UDP", self.m_spinCtrl1.GetValue())
while self.actionflg:
if self.m_radioBtn6.GetValue():#UDP
recvinfo = self.udpsock.recvfrom(MAX_BUF_SIZE)
self.data = recvinfo[0]
self.client = recvinfo[1]
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
with subprocess.Popen(["python", self.m_textCtrl3.GetValue(), str(self.data)],startupinfo=startupinfo , stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) as sp:
sp.wait()
try:
ifget = eval(sp.communicate()[0])
except SyntaxError:
ifget = None
#$python Arbitrary_code. bytes(data) -> response-read from stdout
if not(ifget == b"" or ifget == None):
self.udpsock.sendto(ifget, self.client)
self.SetStatusText("Send back: {}".format(ifget))
else:
self.SetStatusText("No send back data.")
if self.m_radioBtn71.GetValue():#string
self.m_textCtrl9.Value += self.data.decode(STR_ENCODE, "replace")
elif self.m_radioBtn10.GetValue():#decimal
for _char in self.data:
self.m_textCtrl9.Value += str(_char)+DELIMITER
elif self.m_radioBtn8.GetValue():#hex
for _char in self.data:
self.m_textCtrl9.Value += hex(_char)+DELIMITER
elif self.m_radioBtn7.GetValue(): #TCP*********************************************************
self.tcpsock = sock_init("TCP", self.m_spinCtrl1.GetValue())
self.tcpsock.listen(1)
accinfo = self.tcpsock.accept()
self.tcpsock_c = accinfo[0]
self.client = accinfo[1]
self.tcpsock_c.settimeout(TIMEOUT)
while True:
try:
self.data = self.tcpsock_c.recv(MAX_BUF_SIZE)
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
with subprocess.Popen(["python", self.m_textCtrl3.GetValue(), str(self.data)],startupinfo=startupinfo , stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) as sp:
sp.wait()
try:
ifget = eval(sp.communicate()[0])
except SyntaxError:
ifget = None
#$python Arbitrary_code. bytes(data) -> response-read from stdout
if not(ifget == b"" or ifget == None):
self.udpsock.sendto(ifget, self.client)
self.SetStatusText("Send back: {}".format(ifget))
else:
self.SetStatusText("No send back data.")
except sk.timeout or sk.error or self.refleshflg:
print("TCP connection end")
break
if self.m_radioBtn71.GetValue():#string
self.m_textCtrl9.Value += self.data.decode(STR_ENCODE, "replace")
elif self.m_radioBtn10.GetValue():#decimal
for _char in self.data:
self.m_textCtrl9.Value += str(_char)+DELIMITER
elif self.m_radioBtn8.GetValue():#hex
for _char in self.data:
self.m_textCtrl9.Value += hex(_char)+DELIMITER
sleep(0.001)
self.tcpsock_c.close()
self.tcpsock.close()
self.client = None
sleep(0.001)
sleep(0.001)
self.udpsock.close()
self.client = None
###########################################################################
## Class MyDialog1
###########################################################################
class MyDialog1(wx.Dialog):
def __init__( self, parent, txtdata ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"Save", pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE )
self.data = txtdata
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
bSizer7 = wx.BoxSizer( wx.HORIZONTAL )
bSizer6.Add( bSizer7, 1, wx.EXPAND, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
self.m_radioBtn5 = wx.RadioButton( self, wx.ID_ANY, u"text", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer8.Add( self.m_radioBtn5, 0, wx.ALL, 5 )
self.m_radioBtn6 = wx.RadioButton( self, wx.ID_ANY, u"CSV", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer8.Add( self.m_radioBtn6, 0, wx.ALL, 5 )
self.m_radioBtn6.Enable(enable=False)
self.m_radioBtn11 = wx.RadioButton( self, wx.ID_ANY, u"TSV", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer8.Add( self.m_radioBtn11, 0, wx.ALL, 5 )
self.m_radioBtn11.Enable(enable=False)
bSizer6.Add( bSizer8, 1, wx.EXPAND, 5 )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"File path", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
bSizer6.Add( self.m_staticText3, 0, wx.ALL, 5 )
bSizer9 = wx.BoxSizer( wx.HORIZONTAL )
self.m_textCtrl3 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 150,30 ), 0 )
bSizer9.Add( self.m_textCtrl3, 0, wx.ALL, 5 )
self.m_button7 = wx.Button( self, wx.ID_ANY, u"Refer", wx.DefaultPosition, wx.Size( 50,30 ), 0 )
bSizer9.Add( self.m_button7, 0, wx.ALL, 5 )
bSizer6.Add( bSizer9, 1, wx.EXPAND, 5 )
bSizer10 = wx.BoxSizer( wx.HORIZONTAL )
self.m_button5 = wx.Button( self, wx.ID_ANY, u"OK", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer10.Add( self.m_button5, 0, wx.ALL, 5 )
self.m_button6 = wx.Button( self, wx.ID_ANY, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer10.Add( self.m_button6, 0, wx.ALL, 5 )
bSizer6.Add( bSizer10, 1, wx.EXPAND, 5 )
self.m_button5.Bind(wx.EVT_BUTTON, self.bt_OK)
self.m_button6.Bind(wx.EVT_BUTTON, self.bt_Cancel)
self.m_button7.Bind(wx.EVT_BUTTON, self.refer)
self.SetSizer( bSizer6 )
self.Layout()
bSizer6.Fit( self )
self.Centre( wx.BOTH )
def __del__( self ):
pass
def refer(self, event):
event.Skip()
pathwdw = wx.FileDialog(None,defaultDir=os.curdir)
if pathwdw.ShowModal() == wx.ID_OK:
self.m_textCtrl3.SetValue(pathwdw.GetPath())
pathwdw.Close()
else:
pass
def bt_OK(self, event):
with open(self.m_textCtrl3.Value, "w") as sf:
sf.write(self.data)
event.Skip()
self.Close()
def bt_Cancel(self, event):
self.Close()
event.Skip()
if __name__ == "__main__":
with open("config.json", "r") as conf:
cfgdata :dict = json.load(conf)
MAX_BUF_SIZE = cfgdata["MAX_BUF_SIZE"]
TIMEOUT = cfgdata["TCP_TIMEOUT"]
DELIMITER = cfgdata["DELIMITER"]
STR_ENCODE = cfgdata["STR_ENCODE"]
app = wx.App(0, useBestVisual=True)
try:
frame = MyFrame1(None)
app.SetTopWindow(frame)
frame.Show()
except:
frame.Destroy()
app.Destroy()
exit(0)
app.MainLoop()
|
hdf_to_influxdb.py | from datetime import datetime
import time
import h5py
import pandas as pd
from pathlib import Path
from influxdb_client.client.write_api import SYNCHRONOUS, PointSettings
from influxdb_client import InfluxDBClient, WritePrecision
from config_secret import bucket, org, token
from multiprocessing import Process
df_header = ["voltage", "current"]
def ds_to_phys(dataset: h5py.Dataset):
gain = dataset.attrs["gain"]
offset = dataset.attrs["offset"]
return dataset[:] * gain + offset
def extract_hdf(hdf_file: Path) -> pd.DataFrame:
with h5py.File(hdf_file, "r") as hf:
data = dict()
for var in df_header:
sig_phys = ds_to_phys(hf["data"][var])
data[var] = sig_phys
time_index = hf["data"]["time"][:]
data_len = min(
[len(time_index), len(data["voltage"]), len(data["current"])]
)
time_index = time_index[:data_len]
data["current"] = data["current"][:data_len]
data["voltage"] = data["voltage"][:data_len]
df = pd.DataFrame(data=data, columns=df_header, index=time_index)
print("HDF extracted..")
return df
def put_in_influx(data: pd.DataFrame, client_id: int):
# take load off, this can also come from a toml-file
point_setting = PointSettings()
point_setting.add_default_tag("host", str(client_id))
point_setting.add_default_tag("location", "roomX")
#point_setting.add_default_tag("start", "210404200000")
client = InfluxDBClient(url="http://10.0.0.39:8086", token=token, timeout=10000, enable_gzip=True)
write_client = client.write_api(point_settings=point_setting, write_options=SYNCHRONOUS) # Asynch makes no big difference
batch_size = 50000 # link states optimum at 5k lines, https://docs.influxdata.com/influxdb/v2.0/write-data/best-practices/optimize-writes/
sample_size = data.shape[0]
for iter in range(0, sample_size, batch_size):
iter_stop = max(iter, min(iter + batch_size, sample_size - 1))
print(f"writing part: id{client_id} {iter}:{iter_stop}")
write_client.write(bucket, org, record=data.iloc[iter:iter_stop, :], data_frame_measurement_name='my_meas5', write_precision=WritePrecision.NS)
write_client.close()
client.close()
if __name__ == "__main__":
proc_num = 16
sample_size = 1000000
data = extract_hdf(Path("./rec.6.h5"))
data = data.head(sample_size)
print(f"Dataset data: {datetime.fromtimestamp(data.index[0]/1e9)}")
print(f"Dataset data: {datetime.fromtimestamp(data.index[0]/1e9)}")
print(f"Writing Batch of: {data.shape} entries, {data.shape[0]/1e5} sec\n {data.dtypes}")
print(data.iloc[0:5, :])
time_start = time.time()
procs = list()
for iter in range(proc_num):
process = Process(target=put_in_influx, args=(data, iter))
process.start()
procs.append(process)
print("Processes created")
for process in procs:
process.join()
duration = round(time.time() - time_start, 2)
insertsps = round(proc_num * sample_size / duration / 1000)
print(f"Insertion took {duration} seconds, {insertsps} k/s for {proc_num} * {sample_size} items")
# results:
# - inserting 200s data takes ~ 190s (1 node), with almost no load on VM or system
# - ram usage seems to be ok << 1 GB
# - query's with ns resolution can get very slow. ~3s for averaging windows
# - influx can almost naturally import hdf5, numpy-arrays, pandas Dataframes
# - asyncio does not work (threads)
# - multiprocessing-lib (one vServer, 1 host that inserts data) [8cores,HT]
# - 1x 1M, 12.7 s, 79 k/s
# - 4x 1M, 15.48, 258 k/s (~25 % cpu in vm)
# - 8x 1M, 19.53, 410 k/s (~50 % cpu in vm)
# - 16x 1M, 29.96, 534 k/s (~80 % cpu in vm, 1.8 GB ram usage of VM, 100% cpu of host)
# - 32x 1M, 55.157, 580 k/s (~90 % cpu in vm, 1.8 GB ram usage of VM, 100% cpu of host)
# - batch size (16x 1M):
# - 05 k, 31.33 s, 511 k/s
# - 10 k, 30.20 s, 530 k/s
# - 20 k, 29.69 s, 539 k/s
# - 40 k, 29.10 s, 550 k/s
# - 50 k, 29.09 s, 550 k/s
|
test_orm_symbols_facts.py | #------------------------------------------------------------------------------
# Unit tests for Clorm ORM SymbolPredicateUnifer and unify function.
#
# Note: I'm trying to clearly separate tests of the official Clorm API from
# tests of the internal implementation. Tests for the API have names
# "test_api_XXX" while non-API tests are named "test_nonapi_XXX". This is still
# to be completed.
# ------------------------------------------------------------------------------
import unittest,os
import tempfile
from .support import check_errmsg, add_program_string
from clingo import Control, Number, String, Function, SymbolType
# Official Clorm API imports
from clorm.orm import \
BaseField, Raw, RawField, IntegerField, StringField, ConstantField, SimpleField, \
Predicate, ComplexTerm, path, hashable_path, FactBase
# Official Clorm API imports
from clorm import SymbolPredicateUnifier, unify, \
control_add_facts, symbolic_atoms_to_facts, \
parse_fact_string, parse_fact_files, \
UnifierNoMatchError, NonFactError, define_nested_list_field
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
__all__ = [
'UnifyTestCase',
'ClingoControlConvTestCase',
'ParseTestCase'
]
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def hpaths(paths):
return [ hashable_path(path) for path in paths ]
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class UnifyTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
# Simple test to make sure that raw terms unify correctly
#--------------------------------------------------------------------------
def test_predicate_instance_raw_term(self):
raw1 = Function("func",[Number(1)])
raw2 = Function("bob",[String("no")])
rf1 = RawField()
rt1 = Function("tmp", [Number(1), raw1])
rt2 = Function("tmp", [Number(1), raw2])
self.assertTrue(rf1.unifies(raw1))
class Tmp(Predicate):
n1 = IntegerField()
r1 = RawField()
self.assertTrue(Tmp._unifies(rt1))
self.assertTrue(Tmp._unifies(rt2))
t1 = Tmp(1,Raw(raw1))
t2 = Tmp(1,Raw(raw2))
self.assertEqual(set([f for f in unify([Tmp], [rt1,rt2])]),set([t1,t2]))
self.assertEqual(t1.r1.symbol, raw1)
self.assertEqual(t2.r1.symbol, raw2)
#--------------------------------------------------------------------------
# Test a generator that takes n-1 Predicate types and a list of raw symbols
# as the last parameter, then tries to unify the raw symbols with the
# predicate types.
# --------------------------------------------------------------------------
def test_unify(self):
raws = [
Function("afact",[Number(1),String("test")]),
Function("afact",[Number(2),Number(3),String("test")]),
Function("afact",[Number(1),Function("fun",[Number(1)])]),
Function("bfact",[Number(3),String("test")])
]
class Afact1(Predicate):
anum=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Afact2(Predicate):
anum1=IntegerField()
anum2=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Afact3(Predicate):
class Fun(ComplexTerm):
fnum=IntegerField()
anum=IntegerField()
afun=Fun.Field()
# afun=ComplexField(Fun)
class Meta: name = "afact"
class Bfact(Predicate):
anum=IntegerField()
astr=StringField()
af1_1=Afact1(anum=1,astr="test")
af2_1=Afact2(anum1=2,anum2=3,astr="test")
af3_1=Afact3(anum=1,afun=Afact3.Fun(fnum=1))
bf_1=Bfact(anum=3,astr="test")
g1=list(unify([Afact1],raws))
g2=list(unify([Afact2],raws))
g3=list(unify([Afact3],raws))
g4=list(unify([Bfact],raws))
g5=list(unify([Afact1,Bfact],raws))
self.assertEqual([af1_1], g1)
self.assertEqual([af2_1], g2)
self.assertEqual([af3_1], g3)
self.assertEqual([bf_1], g4)
self.assertEqual([af1_1,bf_1], g5)
# Test the ordered option that returns a list of facts that preserves
# the order of the original symbols.
g1=unify([Afact1,Afact2,Bfact], raws, ordered=True)
self.assertEqual(g1, [af1_1,af2_1,bf_1])
#--------------------------------------------------------------------------
# Test unification with nullary predicates
# --------------------------------------------------------------------------
def test_unify_nullary(self):
raws = [
Function("afact",[Number(1),String("test")]),
Function("nullary1",[]),
Function("nullary2",[]),
Function("afact",[Number(2),String("test")]),
]
class Afact(Predicate):
anum=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Nullary1(Predicate):
class Meta: name = "nullary1"
class Nullary2(Predicate):
class Meta: name = "nullary2"
af_1=Afact(anum=1,astr="test")
af_2=Afact(anum=2,astr="test")
u_1=Nullary1()
u_2=Nullary2()
self.assertEqual(list(unify([Nullary1],raws)),[u_1])
self.assertEqual(list(unify([Nullary2],raws)),[u_2])
self.assertEqual(set(unify([Afact,Nullary1,Nullary2],raws)),
set([af_1,af_2,u_1,u_2]))
#--------------------------------------------------------------------------
# Test unifying between predicates which have the same name-arity
# signature. There was a bug in the unify() function where only of the
# unifying classes was ignored leading to failed unification.
# --------------------------------------------------------------------------
def test_unify_same_sig(self):
class ATuple(ComplexTerm):
aconst=ConstantField()
bint = IntegerField()
class Meta: is_tuple = True
class Fact1(Predicate):
aint = IntegerField()
aconst = ConstantField()
class Meta: name = "fact"
class Fact2(Predicate):
aint = IntegerField()
atuple = ATuple.Field()
class Meta: name = "fact"
r1 = Function("fact",[Number(1), Function("bob",[])])
r2 = Function("fact",[Number(1), Function("", [Function("bob",[]),Number(1)])])
# r1 only unifies with Fact1 and r2 only unifies with Fact2
f1 = Fact1(raw=r1)
self.assertEqual(f1.raw, r1)
with self.assertRaises(ValueError) as ctx:
f2 = Fact1(raw=r2)
f2 = Fact2(raw=r2)
self.assertEqual(f2.raw, r2)
with self.assertRaises(ValueError) as ctx:
f1 = Fact2(raw=r1)
# The unify() function should correctly unify both facts
res = unify([Fact1,Fact2],[r1,r2])
self.assertEqual(len(res), 2)
#--------------------------------------------------------------------------
# Test unifying between predicates which have the same name-arity
# signature to make sure the order of the predicate classes correctly
# corresponds to the order in which the facts are unified.
# --------------------------------------------------------------------------
def test_unify_same_sig2(self):
class Fact1(Predicate):
aint = IntegerField()
aconst = ConstantField()
class Meta: name = "fact"
class Fact2(Predicate):
aint = IntegerField()
araw = RawField()
class Meta: name = "fact"
r1 = Function("fact",[Number(1), Function("bob",[])])
r2 = Function("fact",[Number(1), Function("", [Function("bob",[]),Number(1)])])
# r1 only unifies with Fact1 but both r1 and r2 unify with Fact2
f1 = Fact1(raw=r1)
self.assertEqual(f1.raw, r1)
with self.assertRaises(ValueError) as ctx:
f2 = Fact1(raw=r2)
f1_alt = Fact2(raw=r1)
self.assertEqual(f1_alt.raw, r1)
f2 = Fact2(raw=r2)
self.assertEqual(f2.raw, r2)
# unify() unifies r1 with Fact1 (f1) and r2 with Fact2 (f2)
res = unify([Fact1,Fact2],[r1,r2])
self.assertEqual(len(res), 2)
self.assertTrue(f1 in res)
self.assertTrue(f2 in res)
# unify() unifies r1 and r2 with Fact2 (f1_alt and f2)
res = unify([Fact2,Fact1],[r1,r2])
self.assertEqual(len(res), 2)
self.assertTrue(f1_alt in res)
self.assertTrue(f2 in res)
#--------------------------------------------------------------------------
# Test unifying with negative facts
#--------------------------------------------------------------------------
def test_unify_signed_literals(self):
class F1(Predicate):
a = IntegerField
class Meta:
name = "f"
sign = True
class F2(Predicate):
a = IntegerField
class Meta:
name = "f"
sign = False
pos_raw1 = Function("f",[Number(1)])
pos_raw2 = Function("f",[Number(2)])
neg_raw1 = Function("f",[Number(1)],False)
neg_raw2 = Function("f",[Number(2)],False)
pos1 = F1(a=1)
pos2 = F1(a=2)
neg1 = F2(a=1,sign=False)
neg2 = F2(a=2,sign=False)
# unify with all raw
fb = unify([F1,F2], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 4)
self.assertEqual(set(fb.query(F1).all()), set([pos1,pos2]))
self.assertEqual(set(fb.query(F2).all()), set([neg1,neg2]))
fb = unify([F1], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 2)
self.assertEqual(fb.query(F1).count(), 2)
fb = unify([F2], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 2)
self.assertEqual(fb.query(F2).count(), 2)
with self.assertRaises(ValueError) as ctx:
bad1 = F1(a=1,sign=False)
#--------------------------------------------------------------------------
# Test unify catching exceptions. When failing to convert a symbol to a
# python object we need to catch some exceptions. But we shouldn't catch all
# exceptions, otherwise genuine errors (like missing modules) will not be
# caught. Thanks to Susana Hahn for finding this problem.
# --------------------------------------------------------------------------
def test_unify_catch_exceptions(self):
# Define a class that converts strings but makes bad exceptions for any
# other input
class TmpField(BaseField):
def cltopy(raw):
if raw.type == SymbolType.String:
return raw.string
return blah.blah.error1(raw)
def pytocl(v):
if isinstance(v,str): return String(v)
import blah
return blah.error2(v)
# This is good
self.assertEqual(TmpField.cltopy(String("blah")), "blah")
self.assertEqual(TmpField.pytocl("blah"), String("blah"))
# Some things that should throw an exception
with self.assertRaises(AttributeError) as ctx:
r=TmpField.cltopy(1)
check_errmsg("'int' object has no attribute 'type'",ctx)
with self.assertRaises(NameError) as ctx:
r=TmpField.cltopy(Number(1))
check_errmsg("name 'blah' is not defined",ctx)
with self.assertRaises(ModuleNotFoundError) as ctx:
r=TmpField.pytocl(1)
check_errmsg("No module named 'blah'",ctx)
class F(Predicate):
v=TmpField
# Ok
raw=Function("f",[String("astring")])
unify([F],[raw])
# Bad
with self.assertRaises(NameError) as ctx:
raw=Function("f",[Number(1)])
unify([F],[raw])
check_errmsg("name 'blah' is not defined",ctx)
#--------------------------------------------------------------------------
# Test the factbasehelper with double decorators
#--------------------------------------------------------------------------
def test_symbolpredicateunifier(self):
# Using the SymbolPredicateUnifier as a decorator
spu1 = SymbolPredicateUnifier()
spu2 = SymbolPredicateUnifier()
spu3 = SymbolPredicateUnifier(suppress_auto_index=True)
# decorator both
@spu3.register
@spu2.register
@spu1.register
class Afact(Predicate):
num1=IntegerField(index=True)
num2=IntegerField()
str1=StringField()
# decorator without argument
@spu1.register
class Bfact(Predicate):
num1=IntegerField(index=True)
str1=StringField()
self.assertEqual(spu1.predicates, (Afact,Bfact))
self.assertEqual(spu2.predicates, (Afact,))
self.assertEqual(spu3.predicates, (Afact,))
self.assertEqual(set(hpaths(spu1.indexes)),
set(hpaths([Afact.num1,Bfact.num1])))
self.assertEqual(hpaths(spu2.indexes), hpaths([Afact.num1]))
self.assertEqual(spu3.indexes, ())
#--------------------------------------------------------------------------
# Test the symbolpredicateunifier when there are subfields defined
#--------------------------------------------------------------------------
def test_symbolpredicateunifier_with_subfields(self):
spu = SymbolPredicateUnifier()
class CT(ComplexTerm):
a = IntegerField
b = StringField(index=True)
c = (IntegerField(index=True),ConstantField)
@spu.register
class P(Predicate):
d = CT.Field(index=True)
e = CT.Field()
expected=set([hashable_path(P.d),
hashable_path(P.d.b), hashable_path(P.d.c.arg1),
hashable_path(P.e.b), hashable_path(P.e.c.arg1)])
self.assertEqual(spu.predicates, (P,))
self.assertEqual(set([hashable_path(p) for p in spu.indexes]), set(expected))
ct_func=Function("ct",[Number(1),String("aaa"),
Function("",[Number(1),Function("const",[])])])
p1=Function("p",[ct_func,ct_func])
fb=spu.unify(symbols=[p1],raise_on_empty=True)
self.assertEqual(len(fb),1)
self.assertEqual(set([hashable_path(p) for p in fb.indexes]), expected)
#--------------------------------------------------------------------------
# Test that subclass factbase works and we can specify indexes
#--------------------------------------------------------------------------
def test_symbolpredicateunifier_symbols(self):
class Afact(Predicate):
num1=IntegerField()
num2=IntegerField()
str1=StringField()
class Bfact(Predicate):
num1=IntegerField()
str1=StringField()
class Cfact(Predicate):
num1=IntegerField()
af1 = Afact(1,10,"bbb")
af2 = Afact(2,20,"aaa")
af3 = Afact(3,20,"aaa")
bf1 = Bfact(1,"aaa")
bf2 = Bfact(2,"bbb")
cf1 = Cfact(1)
raws = [
Function("afact",[Number(1), Number(10), String("bbb")]),
Function("afact",[Number(2), Number(20), String("aaa")]),
Function("afact",[Number(3), Number(20), String("aaa")]),
Function("bfact",[Number(1),String("aaa")]),
Function("bfact",[Number(2),String("bbb")]),
Function("cfact",[Number(1)])
]
spu = SymbolPredicateUnifier(predicates=[Afact,Bfact,Cfact])
# Test the different ways that facts can be added
fb = spu.unify(symbols=raws)
self.assertFalse(fb._delayed_init)
self.assertEqual(set(fb.predicates), set([Afact,Bfact,Cfact]))
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = spu.unify(symbols=raws, delayed_init=True)
self.assertTrue(fb._delayed_init)
self.assertEqual(set(fb.predicates), set([Afact,Bfact,Cfact]))
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = FactBase()
fb.add([af1,af2,af3])
#### self.assertEqual(fb.add([af1,af2,af3]),3)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = FactBase()
fb.add(af1)
fb.add(af2)
fb.add(af3)
#### self.assertEqual(fb.add(af1),1)
#### self.assertEqual(fb.add(af2),1)
#### self.assertEqual(fb.add(af3),1)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
# Test that adding symbols can handle symbols that don't unify
fb = spu.unify(symbols=raws)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
return
# Test the specification of indexes
class MyFactBase3(FactBase):
predicates = [Afact, Bfact]
spu = SymbolPredicateUnifier(predicates=[Afact,Bfact,Cfact],
indexes=[Afact.num1, Bfact.num1])
fb = spu.unify(symbols=raws)
s = fb.query(Afact).where(Afact.num1 == 1)
self.assertEqual(s.get_unique(), af1)
s = fb.query(Bfact).where(Bfact.num1 == 1)
self.assertEqual(s.get_unique(), bf1)
#------------------------------------------------------------------------------
# Functions that facilitate interactions with clingo.Control. Note: uses
# multiprocessing library to make sure that we avoid the solver not being able
# to release symbols between runs.
# ------------------------------------------------------------------------------
import multiprocessing as mp
class XP(Predicate):
x=IntegerField
class XQ(Predicate):
x=IntegerField
class XQ2(Predicate):
x=StringField
class Meta: name="xq"
def symbolic_atoms_to_facts_test1(q,facts_only):
prgstr="""xq(1). xq("a"). 1 { xp(1);xp(2) }2."""
ctrl=Control()
add_program_string(ctrl,prgstr)
ctrl.ground([("base",[])])
fb=symbolic_atoms_to_facts(ctrl.symbolic_atoms,[XP,XQ,XQ2],
facts_only=facts_only)
q.put(fb)
class ClingoControlConvTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
# Basic test of adding facts into a control object
#--------------------------------------------------------------------------
def test_control_add_facts(self):
class F(Predicate):
anum = IntegerField
f1 = F(1) ; f2 = F(2)
ctrl = Control()
control_add_facts(ctrl,[f1,f2])
ctrl.ground([("base",[])])
model = None
with ctrl.solve(yield_=True) as sh:
for m in sh:
model=str(m)
self.assertEqual(model, "{} {}".format(f1,f2))
#--------------------------------------------------------------------------
# Test converting Control.symbolic_atoms to a factbase
#--------------------------------------------------------------------------
def test_symbolic_atoms_to_facts(self):
fb1_expected=FactBase([XP(1),XP(2),XQ(1),XQ2("a")])
fb2_expected=FactBase([XQ(1),XQ2("a")])
# Return all ground atoms
q=mp.Queue()
p=mp.Process(target=symbolic_atoms_to_facts_test1,args=(q,False))
p.start()
fb1_result=q.get()
p.join()
self.assertEqual(fb1_result,fb1_expected)
# Return only fact atoms
q=mp.Queue()
p=mp.Process(target=symbolic_atoms_to_facts_test1,args=(q,True))
p.start()
fb2_result=q.get()
p.join()
self.assertEqual(fb2_result,fb2_expected)
#------------------------------------------------------------------------------
# Test of functions involve with parsing asp ground facts to clorm facts
#------------------------------------------------------------------------------
class ParseTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
def test_parse_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
y=StringField
class Q(Predicate):
'''A Q predicate'''
x=ConstantField
y=P.Field
asp1="""p(1,"home\\""). -p(-2,"blah").\n"""
asp2=asp1 + """q(X,Y) :- p(X,Y)."""
fb_p=FactBase([P(1,"home\""),P(-2,"blah",sign=False)])
fb_in=FactBase([P(1,"home\""),
Q("abc",P(3,"H ome")),
Q("z",P(-1,"One more string")),
P(-2,"blah",sign=False)])
# Match a basic string with a rule
fb_out = parse_fact_string(asp2,unifier=[P,Q])
self.assertEqual(fb_p,fb_out)
# All inputs and outputs match
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Match only the p/2 facts
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P])
self.assertEqual(fb_p,fb_out)
# Match with comments
fb_out = parse_fact_string(fb_in.asp_str(commented=True),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Error on ununified facts
with self.assertRaises(UnifierNoMatchError) as ctx:
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P],
raise_nomatch=True)
check_errmsg("Cannot unify symbol 'q(abc",ctx)
# Error on nonfact
with self.assertRaises(NonFactError) as ctx:
fb_out = parse_fact_string(asp2,unifier=[P],
raise_nonfact=True)
check_errmsg("Rule 'q(X,Y)",ctx)
# Try the fact files parser
with tempfile.TemporaryDirectory() as tmpdirname:
fname=os.path.join(tmpdirname,"asp.lp")
with open(fname, "w+") as f:
f.write(fb_in.asp_str(commented=True))
fb_out=parse_fact_files([fname],unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
#--------------------------------------------------------------------------
# Test parsing some nested facts
#--------------------------------------------------------------------------
def test_parse_nested_facts(self):
class P(Predicate):
x=IntegerField
y=define_nested_list_field(ConstantField)
fb_in = FactBase([P(x=1,y=tuple(["a","b","c"]))])
aspstr = fb_in.asp_str()
fb_out = parse_fact_string(aspstr,unifier=[P],raise_nomatch=True)
self.assertEqual(fb_in,fb_out)
#--------------------------------------------------------------------------
# Parsing non simple facts to raise NonFactError. Non simple facts include:
# - a term with @-function call (this needs a Control object for grounding)
# - a disjunctive fact
# - a choice rule
# --------------------------------------------------------------------------
def test_parse_non_simple_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
# Using an external function
asp="""p(@func(1))."""
with self.assertRaises(NonFactError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
check_errmsg("'@func(1)' is an external function in 'p(@func(1)).'",ctx)
# A choice rule
asp="""{ p(2); p(3) }."""
with self.assertRaises(NonFactError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
check_errmsg("Aggregate '{ p(2); p(3) }'",ctx)
# A disjunctive fact
asp="""p(2); p(3)."""
with self.assertRaises(NonFactError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
check_errmsg("Disjunction 'p(2); p(3)'",ctx)
# A theory atom - let the general non-fact literal catch this
asp="""&diff{p(2)}."""
with self.assertRaises(NonFactError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
check_errmsg("'&diff { p(2) }'",ctx)
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
|
rl_helper.py | import os
import time
import copy
import numpy as np
from setproctitle import setproctitle
import visdom
from pycrayon import CrayonClient
import torch
import torch.multiprocessing as mp
from rl import EvalResult
from utils import set_random_seed
def build_env(game_type, args, **params):
game_type = game_type.upper()
if game_type == 'ALE':
from ale_env import ALE
return ALE(args.rom_path, **params)
else:
raise RuntimeError('Unknown game_type [{}], should be one of [ALE]'.format(game_type))
def async_train(args, create_agent, model, model_eval):
setproctitle('{}:train[MASTER]'.format(args.name))
counter = mp.Value('l', 0)
def run_trainer(process_idx):
setproctitle('{}:train[{}]'.format(args.name, process_idx))
set_random_seed(np.random.randint(0, 2 ** 32))
agent = create_agent(process_idx)
train_loop(counter, args, agent)
def run_evalator():
setproctitle('{}:eval'.format(args.name))
set_random_seed(np.random.randint(0, 2 ** 32))
eval_loop(counter, args, model, model_eval)
def run_player():
setproctitle('{}:play'.format(args.name))
set_random_seed(np.random.randint(0, 2 ** 32))
play_loop(counter, args, model, model_eval)
processes = []
processes.append(mp.Process(target=run_evalator))
if not args.no_render:
processes.append(mp.Process(target=run_player))
for process_idx in range(args.n_processes):
processes.append(mp.Process(target=run_trainer, args=(process_idx+1,)))
for p in processes:
p.start()
for p in processes:
p.join()
def train_loop(counter, args, agent):
try:
global_t = 0
while True:
agent.set_lr((args.n_steps - global_t - 1) / args.n_steps * args.lr)
t = agent.act()
# Get and increment the global counter
with counter.get_lock():
counter.value += t
global_t = counter.value
if global_t > args.n_steps:
break
except KeyboardInterrupt:
agent.finish()
raise
agent.finish()
def eval_loop(counter, args, shared_model, model_eval):
try:
SEC_PER_DAY = 24*60*60
env = build_env(args.type, args, treat_life_lost_as_terminal=False, max_time=5*60)
model = copy.deepcopy(shared_model)
model.eval()
# Create a new experiment
vis = visdom.Visdom(env='A3C:'+args.name)
cc = CrayonClient()
names = cc.get_experiment_names()
summaries = []
for idx in range(args.n_eval):
name = "{} [{}]".format(args.name, idx+1)
if name in names:
cc.remove_experiment(name)
summaries.append(cc.create_experiment(name))
max_reward = None
save_condition = args.save_intervel
rewards = []
start_time = time.time()
while True:
# Sync with the shared model
model.load_state_dict(shared_model.state_dict())
restart, eval_start_time, eval_start_step = False, time.time(), counter.value
results = []
for i in range(args.n_eval):
model.reset_state()
results.append(model_eval(model, env, vis=(vis, i+1, 60)))
if env.exceed_max:
restart = True
env.reset()
break
env.reset()
if restart:
continue
eval_end_time, eval_end_step = time.time(), counter.value
results = EvalResult(*zip(*results))
rewards.append((counter.value, results.reward))
local_max_reward = np.max(results.reward)
if max_reward is None or max_reward < local_max_reward:
max_reward = local_max_reward
if local_max_reward >= max_reward:
# Save model
torch.save(model.state_dict(), os.path.join(args.model_path, 'best_model.pth'))
time_since_start = eval_end_time - start_time
day = time_since_start // SEC_PER_DAY
time_since_start %= SEC_PER_DAY
seconds_to_finish = (args.n_steps - eval_end_step)/(eval_end_step-eval_start_step)*(eval_end_time-eval_start_time)
days_to_finish = seconds_to_finish // SEC_PER_DAY
seconds_to_finish %= SEC_PER_DAY
print("STEP:[{}|{}], Time: {}d {}, Finish in {}d {}".format(
counter.value, args.n_steps, '%02d' % day, time.strftime("%Hh %Mm %Ss", time.gmtime(time_since_start)),
'%02d' % days_to_finish, time.strftime("%Hh %Mm %Ss", time.gmtime(seconds_to_finish))))
print('\tMax reward: {}, avg_reward: {}, std_reward: {}, min_reward: {}, max_reward: {}'.format(
max_reward, np.mean(results.reward), np.std(results.reward), np.min(results.reward), local_max_reward))
# Plot
for summary, reward in zip(summaries, results.reward):
summary.add_scalar_value('reward', reward, step=eval_start_step)
if counter.value > save_condition or counter.value >= args.n_steps:
save_condition += args.save_intervel
torch.save(model.state_dict(), os.path.join(args.model_path, 'model_iter_{}.pth'.format(counter.value)))
torch.save(model.state_dict(), os.path.join(args.model_path, 'model_latest.pth'))
with open(os.path.join(args.save_path, 'rewards'), 'a+') as f:
for record in rewards:
f.write('{}: {}\n'.format(record[0], record[1]))
del rewards[:]
if counter.value >= args.n_steps:
print('Evaluator Finished !!!')
break
except KeyboardInterrupt:
torch.save(shared_model.state_dict(), os.path.join(args.model_path, 'model_latest.pth'))
raise
def play_loop(counter, args, shared_model, model_eval):
try:
env = build_env(args.type, args, render=not args.no_render, treat_life_lost_as_terminal=False, max_time=5*60)
model = copy.deepcopy(shared_model)
model.eval()
while True:
# Sync with the shared model
model.load_state_dict(shared_model.state_dict())
model.reset_state()
model_eval(model, env)
env.reset()
if counter.value >= args.n_steps:
print('Player Finished !!!')
break
except KeyboardInterrupt:
raise |
p2000.py | #!/usr/bin/env python3
"""RTL-SDR P2000 Receiver for Home Assistant."""
# See README for installation instructions
import calendar
import configparser
import fnmatch
import json
import os
import re
import subprocess
import sys
import threading
import time
from datetime import datetime
import requests
VERSION = "0.0.1"
class MessageItem:
"""Contains all the Message data."""
def __init__(self):
self.timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.message_raw = ""
self.timestamp = ""
self.timereceived = time.monotonic()
self.groupid = ""
self.receivers = ""
self.capcodes = []
self.body = ""
self.location = ""
self.postcode = ""
self.city = ""
self.address = ""
self.street = ""
self.region = ""
self.priority = 0
self.disciplines = ""
self.remarks = ""
self.is_posted = False
def load_config():
"""Create default or load existing config file."""
config = configparser.ConfigParser()
if config.read("config.ini"):
print("Loading configuration from 'config.ini'")
return config
config["main"] = {"debug": False}
config["rtl-sdr"] = {
"cmd": "rtl_fm -f 169.65M -M fm -s 22050 | multimon-ng -a FLEX -t raw -"
}
config["home-assistant"] = {
"baseurl": "http://192.168.2.123:8123",
"token": "Place Your Long-Lived Access Token Here",
"sensorname": "P2000",
}
with open("config.ini", "w") as configfile:
config.write(configfile)
print("Created config file 'config.ini', edit it and restart the program.")
sys.exit(0)
def check_requirements():
"""Check if required software is installed."""
print("Checking if required software is installed")
# Check if rtl_fm is installed
process = subprocess.Popen(
"rtl_fm", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str or "not recognized" in error_str:
print("rtl_fm command not found, please install RTL-SDR software")
return False
print("rtl_fm is found")
# Check if multimon-ng is installed
process = subprocess.Popen(
"multimon-ng -h", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Wait for the process to finish
dummy, err = process.communicate()
error_str = err.decode("utf8")
if "not found" in error_str:
print("multimon-ng not found, please install the multimon-ng package")
return False
print("multimon-ng is found")
return True
def load_capcodes_dict(filename):
"""Load capcodes to dictionary."""
capcodes = {}
try:
print("Loading data from '{}'".format(filename))
with open(filename, "r") as csv_file:
csv_list = [
[val.strip() for val in r.split(",")] for r in csv_file.readlines()
]
(_, *header), *data = csv_list
for row in data:
key, *values = row
capcodes[key] = {key: value for key, value in zip(header, values)}
print("{} records loaded".format(len(capcodes)))
except KeyError:
print(f"Could not parse file contents of: {filename}")
except OSError:
print(f"Could not open/read file: {filename}, ignore filter")
return capcodes
def load_capcodes_filter_dict(filename):
"""Load capcodes ignore or match data to dictionary."""
capcodes = dict()
try:
print("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
for item in lines:
if item[0] == "#":
continue
fields = item.split(",")
if len(fields) == 2:
capcodes[fields[0].strip()] = fields[1].strip()
elif len(fields) == 1:
capcodes[fields[0].strip()] = 'NO DESCR'
print("{} records loaded".format(len(capcodes)))
return capcodes
except KeyError:
print(f"Could not parse file contents of: {filename}")
except OSError:
print(f"Could not open/read file: {filename}, ignore filter")
return capcodes
def load_list(filename):
"""Load data in list."""
tmplist = []
try:
print("Loading data from '{}'".format(filename))
with open(filename, "r") as text_file:
lines = text_file.readlines()
lines_strip = map((lambda line: line.strip()), lines)
tmplist = list(
filter(
lambda line: len(line) > 0
and line[0:1] != "#"
and line[0:1] != ";",
lines_strip,
)
)
print("{} records loaded".format(len(tmplist)))
return tmplist
except KeyError:
print(f"Could not parse file contents of: {filename}")
except OSError:
print(f"Could not open/read file: {filename}")
return tmplist
def check_filter(mylist, text):
"""Check filter data."""
# If list is not loaded or empty allow all
if len(mylist) == 0:
return True
# Check if text applied matches at least one filter
for f_str in mylist:
if fnmatch.fnmatch(text, f_str):
return True
return False
def to_local_datetime(utc_dt):
"""Convert utc to local time."""
time_tuple = time.strptime(utc_dt, "%Y-%m-%d %H:%M:%S")
return time.ctime(calendar.timegm(time_tuple))
def p2000_get_prio(message):
"""Look for priority strings and return level."""
priority = 0
regex_prio1 = r"^A\s?1|\s?A\s?1|PRIO\s?1|^P\s?1"
regex_prio2 = r"^A\s?2|\s?A\s?2|PRIO\s?2|^P\s?2"
regex_prio3 = r"^B\s?1|^B\s?2|^B\s?3|PRIO\s?3|^P\s?3"
regex_prio4 = r"^PRIO\s?4|^P\s?4"
if re.search(regex_prio1, message, re.IGNORECASE):
priority = 1
elif re.search(regex_prio2, message, re.IGNORECASE):
priority = 2
elif re.search(regex_prio3, message, re.IGNORECASE):
priority = 3
elif re.search(regex_prio4, message, re.IGNORECASE):
priority = 4
return priority
class Main:
"""Main class, start of application."""
def __init__(self):
self.running = True
self.messages = []
print(f"RTL-SDR P2000 Receiver for Home Assistant Version {VERSION}\n")
# Set current folder so we can find the config files
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Check if required software is installed
if not check_requirements():
print("Application stopped, required software was not found!")
sys.exit(0)
# Load configuration
self.config = load_config()
self.debug = self.config.getboolean("main", "debug")
self.rtlfm_cmd = self.config.get("rtl-sdr", "cmd")
self.baseurl = self.config.get("home-assistant", "baseurl")
self.token = self.config.get("home-assistant", "token")
self.sensorname = self.config.get("home-assistant", "sensorname")
# Load capcodes data
self.capcodes = load_capcodes_dict("db_capcodes.txt")
# Load plaatsnamen data
self.plaatsnamen = load_list("db_plaatsnamen.txt")
# Load plaatsnamen afkortingen data
self.pltsnmn = load_capcodes_dict("db_pltsnmn.txt")
# Load capcodes ignore data
self.ignorecapcodes = load_capcodes_filter_dict("ignore_capcodes.txt")
# Load text ignore data
self.ignoretext = load_list("ignore_text.txt")
# Load match text filter data
self.matchtext = load_list("match_text.txt")
# Load match capcodes filter data
self.matchcapcodes = load_capcodes_filter_dict("match_capcodes.txt")
# Start thread to get data from RTL-SDR stick
data_thread = threading.Thread(target=self.data_thread_call)
data_thread.start()
# Start thread to post messages to Home Assistant
post_thread = threading.Thread(target=self.post_thread_call)
post_thread.start()
# Run the wait loop
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
break
# Application is interrupted and is stopping
self.running = False
print("Application stopped")
def post_to_homeassistant(self, msg):
"""Post data to Home Assistant via Rest API."""
data = {
"state": msg.body,
"attributes": {
"time received": msg.timestamp,
"group id": msg.groupid,
"receivers": msg.receivers,
"capcodes": msg.capcodes,
"priority": msg.priority,
"disciplines": msg.disciplines,
"raw message": msg.message_raw,
"region": msg.region,
"location": msg.location,
"postcode": msg.postcode,
"city": msg.city,
"address": msg.address,
"street": msg.street,
"remarks": msg.remarks,
},
}
try:
headers = {
"Authorization": "Bearer " + self.token,
"content-type": "application/json",
}
response = requests.post(
self.baseurl + "/api/states/sensor." + self.sensorname,
headers=headers,
data=json.dumps(
data, default=lambda o: o.__dict__, sort_keys=True, indent=4
),
)
response.raise_for_status()
if self.debug:
print(f"POST status: {response.status_code} {response.reason}")
print(f"POST text: {response.text}")
except requests.HTTPError:
print(
f"HTTP Error while trying to post data, check your baseurl and token in config.ini: {response.status_code} {response.reason}"
)
except requests.exceptions.SSLError as err:
print(
f"SSL Error occurred while trying to post data, check baseurl in config.ini:\n{err}"
)
except requests.exceptions.ConnectionError as err:
print(
f"Connection Error occurred while trying to post data, check your baseurl in config.ini:\n{err}"
)
finally:
# Mark as posted to prevent race conditions
msg.is_posted = True
def data_thread_call(self):
"""Thread for parsing data from RTL-SDR."""
print(f"RTL-SDR process started with: {self.rtlfm_cmd}")
multimon_ng = subprocess.Popen(
self.rtlfm_cmd, stdout=subprocess.PIPE, shell=True
)
try:
while self.running:
# Read line from process
line = multimon_ng.stdout.readline()
try:
line = line.decode("utf8", "backslashreplace")
except UnicodeDecodeError:
if self.debug:
print(f"Error while decoding utf8 string: {line}")
line = ""
multimon_ng.poll()
if line.startswith("FLEX") and line.__contains__("ALN"):
line_data = line.split("|")
timestamp = line_data[1]
groupid = line_data[3].strip()
capcodes = line_data[4].strip()
message = line_data[6].strip()
priority = p2000_get_prio(message)
location = ""
postcode = ""
city = ""
address = ""
street = ""
if self.debug:
print(line.strip())
# Get address info if any, look for valid postcode and get the two words around them
# A2 (DIA: ja) AMBU 17106 Schiedamseweg 3134BA Vlaardingen VLAARD bon 8576
regex_address = r"(\w*.) ([1-9][0-9]{3}[a-zA-Z]{2}) (.\w*)"
addr = re.search(regex_address, message)
if addr:
street = addr.group(1)
postcode = addr.group(2)
city = addr.group(3)
address = f"{street} {postcode} {city}"
# Try to get city only when there is one after a prio
# A1 Breda
else:
regex_prio_loc = r"(^A\s?1|\s?A\s?2|B\s?1|^B\s?2|^B\s?3|PRIO\s?1|^P\s?1|PRIO\s?2|^P\s?2) (.\w*)"
loc = re.search(regex_prio_loc, message)
if loc and loc.group(2) in self.plaatsnamen:
city = loc.group(2)
else:
# Find all uppercase words and check if there is a valid city name amoung them
# A2 Ambulancepost Moordrecht Middelweg MOORDR V
regex_afkortingen = "[A-Z]{2,}"
afkortingen = re.findall(regex_afkortingen, message)
for afkorting in afkortingen:
if afkorting in self.pltsnmn:
city = self.pltsnmn[afkorting]["plaatsnaam"]
if not check_filter(self.matchtext, message):
if self.debug:
print(
f"Message '{message}' ignored (didn't match match_text)")
else:
if check_filter(self.ignoretext, message):
if self.debug:
print(
f"Message '{message}' ignored (matched ignore_text)")
else:
# There can be several capcodes in one message
ignore = False
for capcode in capcodes.split(" "):
# Apply filter
if not capcode in self.matchcapcodes and self.matchcapcodes:
if self.debug:
print(
f"Message '{message}' ignored (didn't match match_capcodes)"
)
ignore = True
break
if capcode in self.ignorecapcodes and self.ignorecapcodes:
if self.debug:
print(
f"Message '{message}' to '{capcode}' ignored (capcode in ignore_capcodes)"
)
ignore = True
break
if not ignore:
for capcode in capcodes.split(" "):
# Get data from capcode, if exist
if capcode in self.capcodes:
receiver = "{} ({})".format(
self.capcodes[capcode]["description"], capcode
)
discipline = "{} ({})".format(
self.capcodes[capcode]["discipline"], capcode
)
region = self.capcodes[capcode]["region"]
location = self.capcodes[capcode]["location"]
remark = self.capcodes[capcode]["remark"]
else:
receiver = capcode
discipline = ""
region = ""
remark = ""
# If this message was already received, only add extra info
if len(self.messages) > 0 and self.messages[0].body == message:
if self.messages[0].receivers == "":
self.messages[0].receivers = receiver
elif receiver:
self.messages[0].receivers += ", " + receiver
if self.messages[0].disciplines == "":
self.messages[0].disciplines = discipline
elif discipline:
self.messages[0].disciplines += ", " + discipline
if self.messages[0].remarks == "":
self.messages[0].remarks = remark
elif remark:
self.messages[0].remarks += ", " + remark
self.messages[0].capcodes.append(capcode)
self.messages[0].location = location
self.messages[0].postcode = postcode
self.messages[0].city = city
self.messages[0].street = street
self.messages[0].address = address
else:
msg = MessageItem()
msg.groupid = groupid
msg.receivers = receiver
msg.capcodes = [capcode]
msg.body = message
msg.message_raw = line.strip()
msg.disciplines = discipline
msg.priority = priority
msg.region = region
msg.location = location
msg.postcode = postcode
msg.city = city
msg.street = street
msg.address = address
msg.remarks = remark
msg.timestamp = to_local_datetime(timestamp)
msg.is_posted = False
self.messages.insert(0, msg)
# Limit the message list size
if len(self.messages) > 100:
self.messages = self.messages[:100]
except KeyboardInterrupt:
os.kill(multimon_ng.pid, 9)
if self.debug:
print("Data thread stopped")
# Thread for posting data to Home Assistant
def post_thread_call(self):
"""Thread for posting data."""
if self.debug:
print("Post thread started")
while True:
if self.running is False:
break
now = time.monotonic()
for msg in self.messages:
if msg.is_posted is False and now - msg.timereceived >= 1.0:
self.post_to_homeassistant(msg)
time.sleep(1.0)
if self.debug:
print("Post thread stopped")
# Start application
Main()
|
train.py | import sys, time, threading
from time import sleep
import torch
from torchvision import datasets, transforms, models
from torch import nn
from torch import optim
from get_input_args_train import get_input_args_train
from workspace_utils import active_session
from get_transforms import get_test_transforms, get_train_transforms
from get_model_by_name import get_model_by_name
def animated_loading(id, stop):
chars = "/—\|"
print("Epoch", id)
while True:
for char in chars:
sys.stdout.write('\r'+ '|' + char + '| ')
time.sleep(.1)
sys.stdout.flush()
if stop():
break
def get_device(use_gpu):
return torch.device("cuda" if torch.cuda.is_available() and use_gpu else "cpu")
def train(model, data_dir, device, learning_rate=0.001, hidden_units=1000, epochs=6):
batch_size = 64
start_time = time.time()
train_transforms = get_train_transforms()
test_transforms = get_test_transforms()
train_datasets = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
test_datasets = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
testloader = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
model.classifier = nn.Sequential(nn.Linear(25088, hidden_units),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(hidden_units, 500),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(500, 102),
nn.LogSoftmax(dim=1)
)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
model.to(device)
running_loss = 0
testing_loss = 0
test_accuracy = 0
training_step = 0
testing_step = 0
total_iterations = (len(trainloader) + len(testloader)) * epochs
stop_thread_training_animation = False
stop_thread_testing_animation = False
with active_session():
for epoch in range(1, epochs + 1):
training_animation_thread = threading.Thread(target=animated_loading, args=(epoch, lambda: stop_thread_training_animation))
testing_animation_thread = threading.Thread(target=animated_loading, args=(epoch, lambda: stop_thread_testing_animation))
stop_thread_training_animation = False
training_animation_thread.start()
model.train()
for inputs, labels in trainloader:
total_steps = training_step + testing_step
print("{:.3f}% |training| steps: {}/{}".format(total_steps * 100/total_iterations, total_steps, total_iterations))
training_step += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
stop_thread_training_animation = True
accuracy = 0
model.eval()
with torch.no_grad():
stop_thread_testing_animation = False
testing_animation_thread.start()
for inputs, labels in testloader:
total_steps = training_step + testing_step
print("{:.3f}% |testing| steps: {}/{}".format(total_steps * 100/total_iterations, total_steps, total_iterations))
testing_step += 1
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
loss = criterion(logps, labels)
testing_loss += loss.item()
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
stop_thread_testing_animation = True
print(f"Training Loss {running_loss/training_step}")
print(f"Validation Loss {testing_loss/training_step}")
print(f"Accuracy {accuracy/len(testloader)}")
end_time = time.time()
tot_time = start_time - end_time #calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
return optimizer, train_datasets
def save_checkpoint(model_name, model, optimizer, train_datasets, epochs, save_dir):
PATH = save_dir + 'checkpoint.pth'
checkpoint = {
'train_images_class_idx': train_datasets.class_to_idx,
'epochs': epochs,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'model_name': model_name,
'classifier': model.classifier
}
torch.save(checkpoint, PATH)
print('checkpoint saved')
if __name__ == "__main__":
in_arg = get_input_args_train()
model = get_model_by_name(in_arg.arch)
model = model(pretrained=True)
run_on_gpu = True
if in_arg.gpu == 'False':
run_on_gpu = False
device = get_device(run_on_gpu)
optimizer, train_dataset = train(
model,
in_arg.data_dir,
device,
in_arg.learning_rate,
in_arg.hidden_units,
in_arg.epochs,
)
save_checkpoint(
in_arg.arch,
model,
optimizer,
train_dataset,
in_arg.epochs,
in_arg.save_dir
)
|
node_client.py | import os
import subprocess
import threading
import time
import json
import sublime
import sublime_plugin
import time
from .logger import log
from . import json_helpers
from . import global_vars
# queue module name changed from Python 2 to 3
if int(sublime.version()) < 3000:
import Queue as queue
else:
import queue
class CommClient(object):
def started(self): pass
def postCmd(self, cmd): pass
def sendCmd(self, cmd, cb): pass
def sendCmdSync(self, cmd): pass
def sendCmdAsync(self, cmd, cb): pass
class NodeCommClient(CommClient):
__CONTENT_LENGTH_HEADER = b"Content-Length: "
def __init__(self, script_path):
self.server_proc = None
self.script_path = script_path
# create event handler maps
self.event_handlers = dict()
# create response and event queues
self.msgq = queue.Queue()
self.postq = queue.Queue()
self.asyncReq = {}
self.debug_proc = None
self.breakpoints = []
post_thread = threading.Thread(target=NodeCommClient.monitorPostQueue, args=(self,))
post_thread.daemon = True
post_thread.start()
def makeTimeoutMsg(self, cmd, seq):
jsonDict = json_helpers.decode(cmd)
timeoutMsg = {
"seq": 0,
"type": "response",
"success": False,
"request_seq": seq,
"command": jsonDict["command"],
"message": "timeout"
}
return timeoutMsg
def add_event_handler(self, event_name, cb):
event_handlers = self.event_handlers
if event_name not in event_handlers:
event_handlers[event_name] = []
if cb not in event_handlers[event_name]:
event_handlers[event_name].append(cb)
def started(self):
return self.server_proc is not None
# work in progress
def addBreakpoint(self, file, line):
self.breakpoints.append((file, line))
# work in progress
def debug(self, file):
# TODO: msg if already debugging
self.debug_proc = subprocess.Popen(["node", "--debug", file],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def sendCmd(self, cmd, cb, seq):
"""
send single-line command string; no sequence number; wait for response
this assumes stdin/stdout; for TCP, need to add correlation with sequence numbers
"""
if self.postCmd(cmd):
reqSeq = -1
try:
while reqSeq < seq:
data = self.msgq.get(True, 1)
dict = json_helpers.decode(data)
reqSeq = dict['request_seq']
if cb:
cb(dict)
except queue.Empty:
print("queue timeout")
if (cb):
cb(self.makeTimeoutMsg(cmd, seq))
else:
if (cb):
cb(self.makeTimeoutMsg(cmd, seq))
def sendCmdAsync(self, cmd, cb, seq):
"""
Sends the command and registers a callback
"""
if self.postCmd(cmd):
self.asyncReq[seq] = cb
def sendCmdSync(self, cmd, seq):
"""
Sends the command and wait for the result and returns it
"""
if self.postCmd(cmd):
reqSeq = -1
try:
while reqSeq < seq:
data = self.msgq.get(True, 2)
dict = json_helpers.decode(data)
reqSeq = dict['request_seq']
return dict
except queue.Empty:
print("queue timeout")
return self.makeTimeoutMsg(cmd, seq)
else:
return self.makeTimeoutMsg(cmd, seq)
def monitorPostQueue(self):
"""
Monitor queue and post commands asynchronously
"""
while True:
cmd = self.postq.get(True) + "\n"
if not self.server_proc:
log.error("can not send request; node process not running")
else:
st = time.time()
self.server_proc.stdin.write(cmd.encode())
self.server_proc.stdin.flush()
log.debug("command posted, elapsed %.3f sec" % (time.time() - st))
def postCmd(self, cmd):
"""
Post command to server; no response needed
"""
log.debug('Posting command: {0}'.format(cmd))
if not self.server_proc:
log.error("can not send request; node process not running")
return False
self.postq.put_nowait(cmd)
return True
@staticmethod
def read_msg(stream, msgq, asyncReq, proc, asyncEventHandlers):
"""
Reader thread helper.
Return True to indicate the wish to stop reading the next message.
"""
state = "init"
body_length = 0
while state != "body":
header = stream.readline().strip()
if len(header) == 0:
if state == 'init':
# log.info('0 byte line in stream when expecting header')
return proc.poll() is not None
else:
# Done reading header
state = "body"
else:
state = 'header'
if header.startswith(NodeCommClient.__CONTENT_LENGTH_HEADER):
body_length = int(header[len(NodeCommClient.__CONTENT_LENGTH_HEADER):])
if body_length > 0:
data = stream.read(body_length)
log.debug('Read body of length: {0}'.format(body_length))
data_json = data.decode("utf-8")
data_dict = json_helpers.decode(data_json)
if data_dict['type'] == "response":
request_seq = data_dict['request_seq']
log.debug('Body sequence#: {0}'.format(request_seq))
if request_seq in asyncReq:
callback = asyncReq.pop(request_seq, None)
if callback:
callback(data_dict)
else:
# Only put in the queue if wasn't an async request
msgq.put(data_json)
elif data_dict["type"] == "event":
event_name = data_dict["event"]
if event_name in asyncEventHandlers:
for cb in asyncEventHandlers[event_name]:
# Run <cb> asynchronously to keep read_msg as small as possible
sublime.set_timeout(lambda: cb(data_dict), 0)
else:
log.info('Body length of 0 in server stream')
return False
@staticmethod
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
@staticmethod
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if NodeCommClient.is_executable(program):
return program
else:
# /usr/local/bin is not on mac default path
# but is where node is typically installed on mac
path_list = os.path.expandvars(os.environ["PATH"]) + os.pathsep + "/usr/local/bin" + os.pathsep + os.path.expandvars("$NVM_BIN")
for path in path_list.split(os.pathsep):
path = path.strip('"')
programPath = os.path.join(path, program)
if NodeCommClient.is_executable(programPath):
return programPath
return None
class ServerClient(NodeCommClient):
def __init__(self, script_path):
"""
Starts a node client (if not already started) and communicate with it.
The script file to run is passed to the constructor.
"""
super(ServerClient, self).__init__(script_path)
# start node process
pref_settings = sublime.load_settings('Preferences.sublime-settings')
node_path = pref_settings.get('node_path')
if node_path:
print("Path of node executable is configured as: " + node_path)
configured_node_path = os.path.expandvars(node_path)
if NodeCommClient.is_executable(configured_node_path):
node_path = configured_node_path
else:
node_path = None
print("Configured node path is not a valid executable.")
if not node_path:
if os.name == "nt":
node_path = "node"
else:
node_path = NodeCommClient.which("node")
if not node_path:
path_list = os.environ["PATH"] + os.pathsep + "/usr/local/bin" + os.pathsep + "$NVM_BIN"
print("Unable to find executable file for node on path list: " + path_list)
print("To specify the node executable file name, use the 'node_path' setting")
self.server_proc = None
else:
global_vars._node_path = node_path
print("Trying to spawn node executable from: " + node_path)
try:
if os.name == "nt":
# linux subprocess module does not have STARTUPINFO
# so only use it if on Windows
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW
self.server_proc = subprocess.Popen([node_path, script_path, "--disableAutomaticTypingAcquisition"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, startupinfo=si, bufsize=-1)
else:
log.debug("opening " + node_path + " " + script_path)
self.server_proc = subprocess.Popen([node_path, script_path, "--disableAutomaticTypingAcquisition"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=-1)
except:
self.server_proc = None
# start reader thread
if self.server_proc and (not self.server_proc.poll()):
log.debug("server proc " + str(self.server_proc))
log.debug("starting reader thread")
readerThread = threading.Thread(target=ServerClient.__reader, args=(
self.server_proc.stdout, self.msgq, self.asyncReq, self.server_proc, self.event_handlers))
readerThread.daemon = True
readerThread.start()
@staticmethod
def __reader(stream, msgq, asyncReq, proc, eventHandlers):
""" Main function for reader thread """
while True:
if NodeCommClient.read_msg(stream, msgq, asyncReq, proc, eventHandlers):
log.debug("server exited")
return
class WorkerClient(NodeCommClient):
stop_worker = False
def __init__(self, script_path):
super(WorkerClient, self).__init__(script_path)
def start(self):
WorkerClient.stop_worker = False
node_path = global_vars.get_node_path()
if os.name == "nt":
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW
self.server_proc = subprocess.Popen(
[node_path, self.script_path, "--disableAutomaticTypingAcquisition"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, startupinfo=si, bufsize=-1
)
else:
self.server_proc = subprocess.Popen(
[node_path, self.script_path, "--disableAutomaticTypingAcquisition"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, bufsize=-1)
# start reader thread
if self.server_proc and (not self.server_proc.poll()):
log.debug("worker proc " + str(self.server_proc))
log.debug("starting worker thread")
workerThread = threading.Thread(target=WorkerClient.__reader, args=(
self.server_proc.stdout, self.msgq, self.asyncReq, self.server_proc, self.event_handlers))
workerThread.daemon = True
workerThread.start()
def stop(self):
WorkerClient.stop_worker = True
self.server_proc.kill()
self.server_proc = None
@staticmethod
def __reader(stream, msgq, asyncReq, proc, eventHandlers):
""" Main function for worker thread """
while True:
if NodeCommClient.read_msg(stream, msgq, asyncReq, proc, eventHandlers) or WorkerClient.stop_worker:
log.debug("worker exited")
return
|
log.py | #!/usr/bin/env python
"""
Copyright (c) 2014-2019 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import os
import signal
import socket
import SocketServer
import sys
import threading
import time
import traceback
from core.common import check_whitelisted
from core.common import check_sudo
from core.enums import TRAIL
from core.settings import CEF_FORMAT
from core.settings import config
from core.settings import CONDENSE_ON_INFO_KEYWORDS
from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD
from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS
from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS
from core.settings import HOSTNAME
from core.settings import NAME
from core.settings import TIME_FORMAT
from core.settings import TRAILS_FILE
from core.settings import VERSION
from core.ignore import ignore_event
_condensed_events = {}
_condensing_thread = None
_condensing_lock = threading.Lock()
_thread_data = threading.local()
def create_log_directory():
if not os.path.isdir(config.LOG_DIR):
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please rerun with sudo/Administrator privileges")
os.makedirs(config.LOG_DIR, 0755)
print("[i] using '%s' for log storage" % config.LOG_DIR)
def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True):
retval = None
localtime = time.localtime(sec)
_ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday))
if not reuse:
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
retval = os.open(_, flags)
else:
if _ != getattr(_thread_data, "event_log_path", None):
if getattr(_thread_data, "event_log_handle", None):
try:
os.close(_thread_data.event_log_handle)
except OSError:
pass
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
_thread_data.event_log_path = _
_thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags)
retval = _thread_data.event_log_handle
return retval
def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY):
if not hasattr(_thread_data, "error_log_handle"):
_ = os.path.join(config.LOG_DIR, "error.log")
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS)
_thread_data.error_log_path = _
_thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags)
return _thread_data.error_log_handle
def safe_value(value):
retval = str(value or '-')
if any(_ in retval for _ in (' ', '"')):
retval = "\"%s\"" % retval.replace('"', '""')
return retval
def flush_condensed_events():
while True:
time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD)
with _condensing_lock:
for key in _condensed_events:
condensed = False
events = _condensed_events[key]
first_event = events[0]
condensed_event = [_ for _ in first_event]
for i in xrange(1, len(events)):
current_event = events[i]
for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto
if current_event[j] != condensed_event[j]:
condensed = True
if not isinstance(condensed_event[j], set):
condensed_event[j] = set((condensed_event[j],))
condensed_event[j].add(current_event[j])
if condensed:
for i in xrange(len(condensed_event)):
if isinstance(condensed_event[i], set):
condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i]))
log_event(condensed_event, skip_condensing=True)
_condensed_events.clear()
def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False):
global _condensing_thread
if _condensing_thread is None:
_condensing_thread = threading.Thread(target=flush_condensed_events)
_condensing_thread.daemon = True
_condensing_thread.start()
try:
sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple
if ignore_event(event_tuple):
return
if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip
if not skip_write:
localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec)
if not skip_condensing:
if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS):
with _condensing_lock:
key = (src_ip, trail)
if key not in _condensed_events:
_condensed_events[key] = []
_condensed_events[key].append(event_tuple)
return
current_bucket = sec / config.PROCESS_COUNT
if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling
_thread_data.log_bucket = current_bucket
_thread_data.log_trails = set()
else:
if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))):
return
else:
_thread_data.log_trails.add((src_ip, trail))
_thread_data.log_trails.add((dst_ip, trail))
event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:]))
if not config.DISABLE_LOCAL_LOG_STORAGE:
handle = get_event_log_handle(sec)
os.write(handle, event)
if config.LOG_SERVER:
remote_host, remote_port = config.LOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto("%s %s" % (sec, event), (remote_host, int(remote_port)))
if config.SYSLOG_SERVER:
extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference)
_ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(TRAILS_FILE))), name=info, severity=0, extension=extension)
remote_host, remote_port = config.SYSLOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(_, (remote_host, int(remote_port)))
if config.DISABLE_LOCAL_LOG_STORAGE and not any(config.LOG_SERVER, config.SYSLOG_SERVER) or config.console:
sys.stderr.write(event)
sys.stderr.flush()
if config.plugin_functions:
for _ in config.plugin_functions:
_(event_tuple, packet)
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def log_error(msg):
try:
handle = get_error_log_handle()
os.write(handle, "%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg))
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def start_logd(address=None, port=None, join=False):
class ThreadingUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class UDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
data, _ = self.request
sec, event = data.split(" ", 1)
handle = get_event_log_handle(int(sec), reuse=False)
os.write(handle, event)
os.close(handle)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
server = ThreadingUDPServer((address, port), UDPHandler)
print "[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1])
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
def set_sigterm_handler():
def handler(signum, frame):
log_error("SIGTERM")
raise SystemExit
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, handler)
if __name__ != "__main__":
set_sigterm_handler()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from ipaddress import ip_network
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
import dateutil.parser # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ContainerServiceLinuxProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedClusterWindowsProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ContainerServiceNetworkProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedClusterServicePrincipalProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ContainerServiceSshConfiguration
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ContainerServiceSshPublicKey
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedCluster
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedClusterAADProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedClusterAddonProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ManagedClusterAgentPoolProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import AgentPool
from .vendored_sdks.azure_mgmt_preview_aks.v2019_04_01.models import ContainerServiceStorageProfileTypes
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:9090".format(listen_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
availability_zones=node_zones,
max_pods=int(max_pods) if max_pods else None
)
if enable_vmss:
agent_pool_profile.type = "VirtualMachineScaleSets"
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
service_principal_profile.client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy,
load_balancer_sku]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
if 'omsagent' in addon_profiles:
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
enable_pod_security_policy=bool(enable_pod_security_policy))
if node_resource_group:
mc.node_resource_group = node_resource_group
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=mc)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, client, resource_group_name, name, enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None, no_wait=False,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1 and api_server_authorized_ip_ranges is None and \
(enable_pod_security_policy is False and disable_pod_security_policy is False):
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges"')
# TODO: change this approach when we support multiple agent pools.
instance = client.get(resource_group_name, name)
if update_flags > 0 and instance.max_agent_pools > 1:
raise CLIError('Please use "az aks nodepool command to update per node pool auto scaler settings"')
node_count = instance.agent_pool_profiles[0].count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if api_server_authorized_ip_ranges is not None:
instance.api_server_authorized_ip_ranges = []
if api_server_authorized_ip_ranges != "":
for ip in api_server_authorized_ip_ranges.split(','):
try:
ip_net = ip_network(ip)
instance.api_server_authorized_ip_ranges.append(ip_net.with_prefixlen)
except ValueError:
raise CLIError('IP addresses or CIDRs should be provided for authorized IP ranges.')
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'azure-policy': 'azurepolicy',
'kube-dashboard': 'kubeDashboard'
}
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureLocationToOmsRegionCodeMap = {
"eastus": "EUS",
"westeurope": "WEU",
"southeastasia": "SEA",
"australiasoutheast": "ASE",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"japaneast": "EJP",
"uksouth": "SUK",
"canadacentral": "CCA",
"centralindia": "CIN",
"eastus2euap": "EAP"
}
AzureRegionToOmsRegionMap = {
"australiaeast": "australiasoutheast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "eastus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "eastus",
"eastasia": "southeastasia",
"eastus": "eastus",
"eastus2": "eastus",
"japaneast": "japaneast",
"japanwest": "japaneast",
"northcentralus": "eastus",
"northeurope": "westeurope",
"southcentralus": "eastus",
"southeastasia": "southeastasia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westus": "eastus",
"westus2": "eastus",
"centralindia": "centralindia",
"southindia": "centralindia",
"westindia": "centralindia",
"koreacentral": "southeastasia",
"koreasouth": "southeastasia",
"francecentral": "westeurope",
"francesouth": "westeurope"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = AzureRegionToOmsRegionMap[
rg_location] if AzureRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureLocationToOmsRegionCodeMap[
workspace_region] if AzureLocationToOmsRegionCodeMap[workspace_region] else default_region_code
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
node_zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 node.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specifying both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("current node count '{}' is not in the range of min-count and max-count.".format(node_count))
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
|
ctfd.py | #!/usr/bin/env python2
from halo import Halo
from cloudscraper import create_scraper
from threading import Thread, Lock
from requests import session
from argparse import Namespace, ArgumentParser
from bs4 import BeautifulSoup
from shutil import make_archive
import logging as log
import requests, json
import sys, os
import re, time
try:
import queue
except ImportError:
import Queue as queue
class CTFdScrape(object):
__userAgent = 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
__urlParse = re.compile('(?P<scheme>http.*://)?(?P<host>[^:/ ]+):?(?P<port>[0-9]*)')
def __init__(self, args):
self.auth = dict(name=args.user, password=args.passwd)
self.url = self.__parseUrl(args.url)
self.worker = args.worker
self.scheme = args.scheme
self.override = args.override
self.nofile = args.no_download
self.basepath = args.path
self.proxies = args.proxy
self.cloud = args.enable_cloud
self.config = args.data
self.archived = args.export
self.starTime = time.time()
self.__setEnVar()
def __bypassCloudflareProtection(self):
with Halo(text='Checking for DDOS Protection') as sp:
if self.ses.get(self.url, timeout=10, verify=False).status_code == 503:
self.ses = create_scraper()
sp.succeed('DDOS Protection Found')
def __setEnVar(self):
# CTFd params
self.keys = 'data'
self.version = 'v.1.0'
self.dlSize = 0.0
self.chcount = 0
self.files = []
self.warns = []
self.chals = {}
# Persistent session
self.ses = session()
self.helper = Helper(self.ses)
self.ses.headers.update({'User-Agent' : self.__userAgent})
if self.proxies:
proxy = {'http' : 'http://%s'%(self.proxies),
'https' : 'https://%s'%(self.proxies)}
self.ses.proxies.update(proxy)
# CTFd Endpoint
self.ch_url = self.url + '/api/v1/challenges'
self.hi_url = self.url + '/api/v1/hints'
self.lg_url = self.url + '/login'
# Other
self.charlst = re.escape('\/:*?"<>|.')
self.regex = re.compile(r'(\/files\/)?([a-f0-9]*\/.*\.*\w*)')
self.escape = re.compile(r'[%s]|(\s )'%(self.charlst))
self.travers = True
#Logging
if not os.path.exists('logs'):
os.makedirs('logs')
m = '\n[%(asctime)s]\n[%(lineno)d-th line] %(message)s\n'
log.basicConfig(level=log.INFO, filename='logs/debug.log', format=m)
self.dlError = '{} couldn\'t be downloaded'
def __login(self):
try:
resp = self.ses.get(self.lg_url,verify=False)
soup = BeautifulSoup(resp.text,'lxml')
nonce = soup.find('input', {'name':'nonce'}).get('value')
self.auth['nonce'] = nonce
self.title = soup.title.string
resp = self.ses.post(self.lg_url, data=self.auth, verify=False)
return 'incorrect' not in resp.text
except Exception as e:
log.error('%s'%(e))
def __manageVersion(self):
resp = self.ses.get(self.ch_url,verify=False)
if '404' in resp.text:
self.keys = 'game'
self.version = 'v.1.2.0'
self.ch_url = self.url + '/chals'
self.hi_url = self.url + '/hints'
self.sol_url = self.ch_url + '/solves'
def __parseUrl(self, url):
matches = self.__urlParse.search(url)
if matches:
url = matches.group() if matches.group('scheme') else\
'%s://%s' % (self.scheme, matches.group())
return url
def __getHintById(self, id):
resp = self.ses.get('%s/%s' % (self.hi_url,id),verify=False).json()
return resp['data']['content']
def __getHints(self, data):
res = []
for hint in data:
if hint['cost'] == 0:
if self.version != 'v.1.2.0':
res.append(self.__getHintById(hint['id']))
else:
res.append(hint['hint'])
if not res: res.append('-')
return res
def __getSolves(self, data):
if self.version != 'v.1.2.0':
return data['solves']
else:
try:
return self.solves[str(data['id'])]
except:
self.solves = self.ses.get(self.sol_url,verify=False).json()
return self.solves[str(data['id'])]
def __getChallById(self, id):
try:
resp = self.ses.get('%s/%s' % (self.ch_url,id),verify=False).json()
return self.__parseData(resp['data'])
except Exception as e:
log.error('%s'%(e))
def __getChall(self, q):
while not q.empty():
id = q.get()
ch = self.chals[id]
if self.auth['name'] and self.auth['password']:
if self.version != 'v.1.2.0':
self.chals[id] = self.__getChallById(id)
else:
try:
if self.traverseable:
self.chals[id] = self.__getChallById(id)
else:
self.chals[id] = self.__parseData(ch)
except:
self.traverseable = False
self.chals[id] = self.__parseData(ch)
self.chals.pop(id) if not self.chals[id] else None
q.task_done()
return True
def __parseData(self, data):
if data:
entry = {
'id' : data['id'],
'name' : self.escape.sub('', data['name']),
'points' : data['value'],
'description' : data['description'],
'files' : data['files'],
'category' : self.escape.sub('', data['category']),
'solves' : self.__getSolves(data),
'hints' : self.__getHints(data['hints'])
}
# print(json.dumps(entry, sort_keys=True, indent=4))
return entry
def __identifyCloudDrive(self, text, path):
text = text.decode('utf-8')
if self.cloud:
drives = re.compile(r'(drive.google.com)|(dropbox.com)')
matches = drives.search(text)
if matches:
baseurl = matches.group()
if 'drive.google.com' in baseurl:
rule = re.compile(r'(https://)?drive.google.com.*id=([\?/\w=_-]*)')
match = [id[-1] for id in rule.findall(text)]
url = 'https://drive.google.com/uc?id={}'
return [(path, url.format(id)) for id in match]
else:
rule = re.compile(r'(https://)?(www.dropbox.com/.*dl=)')
matches = rule.findall(text)
return [(path,''.join(i)+'1') for i in matches]
return []
def __downloadHandler(self, data):
path, url = data
if 'google.com' in url:
self.dlSize += self.helper.gdown(url, path, self.override)
else:
rule = re.compile(r'\?.*')
name = rule.sub('', url.split('/')[-1])
if 'dropbox' not in url:
url = '%s/files/%s' % (self.url, url)
path = os.path.join(path, name)
if not os.path.exists(path) or self.override:
response = self.ses.get(url, verify=False ,stream=True)
filesize = self.helper.get_content_len(response)
self.helper.download(response, path)
if os.path.exists(path):
self.dlSize += filesize
else:
self.warns.append(self.dlError.format(url))
def __download(self, q):
while not q.empty():
data = q.get()
if not self.nofile or self.override:
try:
self.__downloadHandler(data)
except Exception as e:
if not self.nofile:
log.exception('%s'%(e))
q.task_done()
return True
def __populate(self, q):
while not q.empty():
vals = self.chals[q.get()]
ns = Namespace(**vals)
path = os.path.join(self.path, ns.category, ns.name)
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'README.md'),'wb') as f:
desc = ns.description.encode('utf-8').strip()
name = ns.name.encode('utf-8').strip()
cat = ns.category.encode('utf-8').strip()
solve = str(ns.solves).encode('utf-8').strip()
hint = '\n* '.join(ns.hints).encode('utf-8')
cont = '# %s [%s pts]\n\n' % (name, ns.points)
cont += '**Category:** %s\n' % (cat)
cont += '**Solves:** %s\n\n' % (solve)
cont += '## Description\n>%s\n\n' % (desc)
cont += '**Hint**\n* %s\n\n' % (hint)
cont += '## Solution\n\n'
cont += '### Flag\n\n'
if sys.version_info.major == 2:
f.write(cont)
else:
cont = re.sub(r"(b\')|\'",'',cont)
f.write(bytes(cont.encode()))
self.files += [(path, self.regex.search(i).group(2)) for i in ns.files]
self.files += self.__identifyCloudDrive(desc, path)
data = self.entry['data'].get(ns.category, list())
if not data:
self.entry['data'][ns.category] = data
data.append(vals)
q.task_done()
return True
def __listChall(self, sp):
for key,val in self.entry['data'].items():
sp.start('{0:<30}({1:<0})'.format(key, len(val)))
sp.succeed()
def __listWarn(self, sp):
for val in self.warns:
sp.warn(val)
def __Threader(self, elements, action=None):
que = queue.Queue()
[que.put(_) for _ in elements]
for i in range(self.worker):
worker = Thread(target=action, args=(que, ))
worker.setDaemon(True)
worker.start()
que.join()
del que
def authenticate(self):
self.__bypassCloudflareProtection()
with Halo(text='\n Authenticating') as sp:
if not self.__login():
sp.fail(' Login Failed :(')
sys.exit()
sp.succeed(' Login Success')
self.__manageVersion()
path = os.path.join(self.basepath, self.title, 'challs.json')
if os.path.exists(path):
self.config = path
if self.config:
self.parseConfig(self.config)
def getChallenges(self):
with Halo(text='\n Collecting challs') as sp:
try:
chals = self.ses.get(self.ch_url,verify=False).json()[self.keys]
chals = sorted(chals, key=lambda _: _['category'])
for chal in chals:
if not self.chals.get(chal['id']):
self.chcount += 1
self.chals[chal['id']] = chal
if self.chcount > 0:
sp.succeed('Found %s new challenges'%(self.chcount))
else:
sp.warn('There are no new challenges')
except Exception as e:
sp.fail('No challenges found :(')
sys.exit()
return True
def createArchive(self):
orig_path = os.getcwd()
self.path = os.path.join(orig_path, self.basepath, self.title)
self.entry = dict(url=self.url, title=self.title, data={})
if not os.path.exists(self.path):
os.makedirs(self.path)
os.chdir(self.path)
with Halo(text='\n Updating Assets') as sp:
self.__Threader(self.chals, self.__getChall)
self.__Threader(self.chals, self.__populate)
self.__Threader(self.files, self.__download)
# The size output may be wrong (false positive)
# due to the usage of request.get(url, stream=True)
# in order to get the 'Content-Length' header
sp.succeed('Found {0:} files ({1:.1f} MB downloaded)'\
.format(len(self.files),self.dlSize/10**6))
if self.warns:
self.__listWarn(sp)
with open('challs.json','wb') as f:
data = json.dumps(self.entry ,sort_keys=True, indent=4)
if sys.version_info.major == 2:
f.write(data)
else:
f.write(bytes(data.encode()))
if self.archived:
target = os.path.join(orig_path, self.title)
with Halo('Saving archive as ZIP') as sp:
make_archive(target, 'zip', '.')
sp.succeed('The archive were saved successfully')
def review(self):
print('\n[Summary]')
self.__listChall(Halo())
print('\n[Finished in {0:.2f} second]'.format(time.time() - self.starTime))
def parseConfig(self, path):
with Halo(text='\n Loading an existing data') as sp:
try:
with open(path) as config:
data = json.loads(config.read())
for vals in data['data'].values():
for val in vals:
id = val['id']
self.chals[id] = val
if not self.url:
self.url = data.get('url','')
self.title = data.get('title','')
sp.succeed('Loaded %s challs from challs.json'%(len(self.chals)))
except Exception as e:
log.error('%s'%(e))
sp.fail('challs.json No such file or directory')
sys.exit()
class Helper(object):
def __init__(self, session):
self.session = session
def get_confirm_token(self, response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def find(self, val, text, offset):
soup = BeautifulSoup(text, 'lxml')
match = soup.find_all(val)
if match:
return match[offset].text
return ''
def get_content_len(self, response, val=0):
headers = response.headers
val = float(headers.get('Content-Length', 0))
if not val:
val = self.find('span', response.text, -1)
if val:
size = val.split()[-1][1:-1]
if 'M' in size:
val = float(size[:-1])*10**6
elif 'G' in size:
val = float(size[:-1])*10**9
return val
def get_gdrive_name(self, response):
head = response.headers
rule = re.compile(r'filename="(.*)"')
match = rule.search(head.get("Content-Disposition",''))
if match:
return match.group(1)
return self.find('a', response.text, -4)
def gdown(self, url, path, enable=False):
baseurl = 'https://docs.google.com/uc?export=download'
fileid = url.split('id=')[1]
params = {'id' : fileid}
session = requests.session()
response = session.get(baseurl, params=params, stream=True)
tokens = self.get_confirm_token(response)
filename = self.get_gdrive_name(response)
filesize = self.get_content_len(response)
if tokens:
params.update(dict(confirm=tokens))
path = os.path.join(path, filename)
if not os.path.exists(path) or enable:
respons = session.get(baseurl, params=params, stream=True)
self.download(respons, path)
# if os.path.exists(path):
# print('success')
return filesize
def download(self, response, path):
if response.status_code == 200:
with open(path, 'wb') as f:
for chunk in response.iter_content(512*1024):
if chunk:
f.write(chunk)
def main():
parser = ArgumentParser(description='Simple CTFd-based scraper for challenges gathering')
parser.add_argument('user', nargs='?', metavar='user', type=str, help='Username/email')
parser.add_argument('passwd', nargs='?', metavar='passwd', type=str, help='Password')
parser.add_argument('url', nargs='?', metavar='url', type=str, default='', help='CTFd platform url')
parser.add_argument('--data', metavar='data', type=str, help='Populate from challs.json')
parser.add_argument('--proxy', metavar='proxy', type=str, help='Request behind proxy server')
parser.add_argument('--path', metavar='path', type=str, help='Target directory, default: CTF', default='CTF')
parser.add_argument('--worker', metavar='worker', type=int, help='Number of threads, default: 10', default=10)
parser.add_argument('--scheme', metavar='scheme', type=str, help='URL scheme, default: https', default='https')
parser.add_argument('--enable-cloud', help='Permit file download from a cloud drive, default=False', action='store_true')
parser.add_argument('--override', help='Override existed chall file', action='store_true')
parser.add_argument('--no-download', help='Don\'t download chall file', action='store_true')
parser.add_argument('--export', help='Export challenges directory as zip, default=False', action='store_true')
args = parser.parse_args()
ctf = CTFdScrape(args)
if args.data or args.url:
if args.user and args.passwd:
ctf.authenticate()
ctf.getChallenges()
else:
ctf.parseConfig(args.data)
ctf.nofile = True
ctf.createArchive()
ctf.review()
else:
parser.error('too few arguments')
if __name__ == '__main__':
main()
|
roadblock.py | #!/usr/bin/python3
'''Roadblock is a synchronization and message passing utility which relies on redis for communication'''
import argparse
import datetime
import time
import calendar
import socket
import signal
import hashlib
import json
import uuid
import threading
import logging
import sys
from dataclasses import dataclass
import redis
import jsonschema
RC_SUCCESS=0
RC_INVALID_INPUT=2
RC_TIMEOUT=3
RC_ABORT=4
# define some global variables
@dataclass
class global_vars:
'''Global variables'''
alarm_active = False
args = None
con_pool = None
con_pool_state = False
con_watchdog_exit = None
con_watchdog = None
redcon = None
pubsubcon = None
initiator = False
mirror_busB = False
schema = None
user_schema = None
my_id = None
watch_busA = True
watch_busB = False
leader_abort = False
follower_abort = False
initiator_type = None
initiator_id = None
followers = { "online": {},
"ready": {},
"gone": {} }
processed_messages = {}
messages = { "sent": [],
"received": [] }
message_log = None
user_messages = []
log_debug_format = '[%(module)s %(funcName)s:%(lineno)d]\n[%(asctime)s][%(levelname) 8s] %(message)s'
log_normal_format = '[%(asctime)s][%(levelname) 8s] %(message)s'
log = None
def message_to_str(message):
'''Converts a message into a JSON string'''
return json.dumps(message, separators=(",", ":"))
def message_from_str(message):
'''Convert a JSON string into a message'''
return json.loads(message)
def message_build(recipient_type, recipient_id, command, value=None):
'''Create a generic message using the ID and role of the sender'''
return message_build_custom(t_global.args.roadblock_role, t_global.my_id, recipient_type, recipient_id, command, value)
def message_build_custom(sender_type, sender_id, recipient_type, recipient_id, command, value=None):
'''Create a custom message with any user specified values'''
message = {
"payload": {
"uuid": str(uuid.uuid4()),
"roadblock": t_global.args.roadblock_uuid,
"sender": {
"timestamp": calendar.timegm(time.gmtime()),
"type": sender_type,
"id": sender_id,
},
"recipient": {
"type": recipient_type
},
"message": {
"command": command
}
},
"checksum": None
}
if recipient_type != "all":
message["payload"]["recipient"]["id"] = recipient_id
if value is not None:
if command == "user-string":
message["payload"]["message"]["user-string"] = value
elif command == "user-object":
message["payload"]["message"]["user-object"] = value
else:
message["payload"]["message"]["value"] = str(value)
message["checksum"] = hashlib.sha256(str(message_to_str(message["payload"])).encode("utf-8")).hexdigest()
return message
def message_validate(message):
'''Validate that a received message matches the message schema and that it is not corrupted'''
try:
jsonschema.validate(instance=message, schema=t_global.schema)
checksum = hashlib.sha256(str(message_to_str(message["payload"])).encode("utf-8")).hexdigest()
return bool(message["checksum"] == checksum)
except jsonschema.exceptions.SchemaError:
return False
def message_for_me(message):
'''Determine if a received message was intended for me'''
message["payload"]["recipient"]["timestamp"] = calendar.timegm(time.gmtime())
if message["payload"]["sender"]["id"] == t_global.my_id and message["payload"]["sender"]["type"] == t_global.args.roadblock_role:
# I'm the sender so ignore it
return False
elif message["payload"]["recipient"]["type"] == "all":
return True
elif message["payload"]["recipient"]["type"] == t_global.args.roadblock_role and message["payload"]["recipient"]["id"] == t_global.my_id:
return True
else:
return False
def message_get_command(message):
'''Extract the command from a message'''
return message["payload"]["message"]["command"]
def message_get_value(message):
'''Extract a value from the a message'''
return message["payload"]["message"]["value"]
def message_get_sender(message):
'''Extract the sender ID from a message'''
return message["payload"]["sender"]["id"]
def message_get_sender_type(message):
'''Extract the sender type from a message'''
return message["payload"]["sender"]["type"]
def message_get_uuid(message):
'''Extract a message's UUID'''
return message["payload"]["uuid"]
def define_usr_msg_schema():
'''Define the schema used to validate user messages'''
t_global.user_schema = {
"type": "array",
"minItems": 1,
"uniqueItems": True,
"items": {
"oneOf": [
{
"type": "object",
"properties": {
"recipient": {
"$ref": "#/definitions/recipient"
},
"user-string": {
"type": "string",
"minLength": 1
}
},
"required": [
"recipient",
"user-string"
],
"additionalProperties": False
},
{
"type": "object",
"properties": {
"recipient": {
"$ref": "#/definitions/recipient"
},
"user-object": {
"type": "object"
}
},
"required": [
"recipient",
"user-object"
],
"additionalProperties": False
}
]
},
"definitions": {
"recipient": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
"leader",
"follower",
"all"
]
},
"id": {
"type": "string",
"minLength": 1
}
},
"required": [
"type",
"id"
],
"additionalProperties": False
}
}
}
def define_msg_schema():
'''Define the schema used to validate roadblock protocol messages'''
t_global.schema = {
"type": "object",
"properties": {
"payload": {
"type": "object",
"properties": {
"uuid": {
"type": "string",
"minLength": 36,
"maxLength": 36
},
"roadblock": {
"type": "string",
"enum": [
t_global.args.roadblock_uuid
]
},
"sender": {
"type": "object",
"properties": {
"timestamp": {
"type": "integer"
},
"type": {
"type": "string",
"enum": [
"leader",
"follower"
]
},
"id": {
"type": "string",
"minLength": 1
}
},
"required": [
"timestamp",
"type",
"id"
],
"additionalProperties": False
},
"recipient": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
"leader",
"follower",
"all"
]
},
"id": {
"type": "string",
"minLength": 1
}
},
"required": [
"type"
],
"additionalProperties": False,
"if": {
"properties": {
"type": {
"enum": [
"leader",
"follower"
]
}
}
},
"then": {
"required": [
"id"
]
}
},
"message": {
"type": "object",
"properties": {
"command": {
"type": "string",
"enum": [
"timeout-ts",
"initialized",
"switch-buses",
"leader-online",
"follower-online",
"all-online",
"initiator-info",
"follower-ready",
"follower-ready-abort",
"all-ready",
"all-go",
"all-abort",
"follower-gone",
"all-gone",
"user-string",
"user-object"
]
},
"value": {
"type": "string",
"minLength": 1
},
"user-string": {
"type": "string",
"minLength": 1
},
"user-object": {}
},
"required": [
"command"
],
"additionalProperties": False,
"allOf": [
{
"if": {
"properties": {
"command": {
"enum": [
"timeout-ts"
]
}
}
},
"then": {
"required": [
"value"
]
}
},
{
"if": {
"properties": {
"command": {
"enum": [
"user-string"
]
}
}
},
"then": {
"required": [
"user-string"
]
}
},
{
"if": {
"properties": {
"command": {
"enum": [
"user-object"
]
}
}
},
"then": {
"required": [
"user-object"
]
}
}
]
}
},
"required": [
"uuid",
"roadblock",
"sender",
"recipient",
"message"
],
"additionalProperties": False
},
"checksum": {
"type": "string",
"minLength": 64,
"maxLength": 64
}
},
"required": [
"payload",
"checksum"
],
"additionalProperties": False
}
def send_user_messages():
'''Send user defined messages'''
if t_global.user_messages is not None:
t_global.log.info("Sending user requested messages")
user_msg_counter = 1
for user_msg in t_global.user_messages:
if "user-string" in user_msg:
t_global.log.info("Sending user message %d: 'user-string'", user_msg_counter)
message_publish(message_build(user_msg["recipient"]["type"], user_msg["recipient"]["id"], "user-string", user_msg["user-string"]))
elif "user-object" in user_msg:
t_global.log.info("Sending user message %d: 'user-object'", user_msg_counter)
message_publish(message_build(user_msg["recipient"]["type"], user_msg["recipient"]["id"], "user-object", user_msg["user-object"]))
user_msg_counter += 1
def message_handle (message):
'''Roadblock protocol message handler'''
msg_uuid = message_get_uuid(message)
if msg_uuid in t_global.processed_messages:
t_global.log.debug("I have already processed this message! [%s]", msg_uuid)
return RC_SUCCESS
else:
t_global.log.debug("adding uuid='%s' to the processed messages list", msg_uuid)
t_global.processed_messages[msg_uuid] = True
if t_global.message_log is not None:
# if the message log is open then append messages to the queue
# for later dumping
t_global.messages["received"].append(message)
msg_command = message_get_command(message)
if msg_command == "timeout-ts":
t_global.log.info("Received 'timeout-ts' message")
cluster_timeout = int(message_get_value(message))
mytime = calendar.timegm(time.gmtime())
timeout = mytime - cluster_timeout
if timeout < 0:
signal.alarm(abs(timeout))
t_global.alarm_active = True
t_global.log.info("The new timeout value is in %d seconds", abs(timeout))
t_global.log.info("Timeout: %s", datetime.datetime.utcfromtimestamp(cluster_timeout).strftime("%Y-%m-%d at %H:%M:%S UTC"))
else:
signal.alarm(0)
t_global.alarm_active = False
t_global.log.critical("The timeout has already occurred")
return RC_TIMEOUT
elif msg_command == "switch-buses":
t_global.log.debug("switching busses")
t_global.watch_busA = False
t_global.watch_busB = True
elif msg_command == "leader-online":
if t_global.args.roadblock_role == "follower":
t_global.log.debug("I see that the leader is online")
elif msg_command == "follower-online":
if t_global.args.roadblock_role == "leader":
msg_sender = message_get_sender(message)
if msg_sender in t_global.followers["online"]:
t_global.log.info("Received 'follower-online' message from '%s'", msg_sender)
del t_global.followers["online"][msg_sender]
elif msg_sender in t_global.args.roadblock_followers:
t_global.log.warning("Did I already process this 'follower-online' message from follower '%s'?", msg_sender)
else:
t_global.log.info("Received 'follower-online' message from unknown follower '%s'", msg_sender)
if len(t_global.followers["online"]) == 0:
t_global.log.info("Sending 'all-online' message")
message_publish(message_build("all", "all", "all-online"))
if t_global.initiator:
t_global.mirror_busB = False
send_user_messages()
elif msg_command == "all-online":
if t_global.initiator:
t_global.log.info("Initiator received 'all-online' message")
t_global.mirror_busB = False
else:
t_global.log.info("Received 'all-online' message")
send_user_messages()
if t_global.args.roadblock_role == "follower":
if t_global.args.abort:
t_global.log.info("Sending 'follower-ready-abort' message")
message_publish(message_build("leader", t_global.args.roadblock_leader_id, "follower-ready-abort"))
else:
t_global.log.info("Sending 'follower-ready' message")
message_publish(message_build("leader", t_global.args.roadblock_leader_id, "follower-ready"))
elif msg_command in ("follower-ready", "follower-ready-abort"):
if t_global.args.roadblock_role == "leader":
t_global.log.debug("leader got a 'follower-ready'")
if msg_command == "follower-ready-abort":
t_global.leader_abort = True
msg_sender = message_get_sender(message)
if msg_sender in t_global.followers["ready"]:
t_global.log.info("Received '%s' message from '%s'", msg_command, msg_sender)
del t_global.followers["ready"][msg_sender]
elif msg_sender in t_global.args.roadblock_followers:
t_global.log.warning("Received a redundant '%s' message from follower '%s'?", msg_command, msg_sender)
else:
t_global.log.info("Received '%s' message from unknown follower '%s'", msg_command, msg_sender)
if len(t_global.followers["ready"]) == 0:
t_global.log.info("Sending 'all-ready' message")
message_publish(message_build("all", "all", "all-ready"))
if t_global.leader_abort:
t_global.log.info("Sending 'all-abort' command")
message_publish(message_build("all", "all", "all-abort"))
else:
t_global.log.info("Sending 'all-go' command")
message_publish(message_build("all", "all", "all-go"))
elif msg_command == "all-ready":
t_global.log.info("Received 'all-ready' message")
elif msg_command in ("all-go", "all-abort"):
if t_global.args.roadblock_role == "follower":
if msg_command == "all-go":
t_global.log.info("Received 'all-go' from leader")
else:
t_global.log.info("Received 'all-abort' from leader")
t_global.follower_abort = True
# tell the leader that I'm gone
t_global.log.info("Sending 'follower-gone' message")
message_publish(message_build("leader", t_global.args.roadblock_leader_id, "follower-gone"))
# signal myself to exit
t_global.watch_busB = False
elif msg_command == "follower-gone":
if t_global.args.roadblock_role == "leader":
t_global.log.debug("leader got a 'follower-gone' message")
msg_sender = message_get_sender(message)
if msg_sender in t_global.followers["gone"]:
t_global.log.info("Received 'follower-gone' message from '%s'", msg_sender)
del t_global.followers["gone"][msg_sender]
elif msg_sender in t_global.args.roadblock_followers:
t_global.log.warning("Received a redundant 'follower-gone' message from follower '%s'?", msg_sender)
else:
t_global.log.info("Received 'follower-gone' message from unknown follower '%s'", msg_sender)
if len(t_global.followers["gone"]) == 0:
# send a message that will probably not be observed by
# anyone...but just in case...
t_global.log.info("Sending 'all-gone' message")
message_publish(message_build("all", "all", "all-gone"))
# signal myself to exit
t_global.watch_busB = False
elif msg_command == "initiator-info":
t_global.initiator_type = message_get_sender_type(message)
t_global.initiator_id = message_get_sender(message)
t_global.log.debug("Received an 'initiator-info' message with type='%s' and id='%s'", t_global.initiator_type, t_global.initiator_id)
return RC_SUCCESS
def message_publish(message):
'''Publish messages for subscribers to receive'''
message_str = message_to_str(message)
ret_val = 0
counter = 0
while ret_val == 0:
counter += 1
# this call should return the number of clients that receive the message
# we expect it to be greater than zero, if not we retry
ret_val = t_global.redcon.publish(t_global.args.roadblock_uuid + "__busB", message_str)
if ret_val == 0:
t_global.log.warning("Failed attempt %d to publish message '%s'", counter, message)
backoff(counter)
if t_global.message_log is not None:
# if the message log is open then append messages to the queue
# for later dumping
t_global.messages["sent"].append(message)
return RC_SUCCESS
def key_delete(key):
'''Delete a key from redis'''
ret_val = 0
counter = 0
while ret_val == 0:
counter += 1
# this call should return the number of keys deleted which is
# expected to be one, if not we retry
ret_val = t_global.redcon.delete(key)
if ret_val == 0:
t_global.log.warning("Failed attempt %d to delete key '%s'", counter, key)
backoff(counter)
return RC_SUCCESS
def key_set_once(key, value):
'''Set a key once in redis'''
ret_val = 0
counter = 0
while ret_val == 0:
counter += 1
# this call should return one on success, if not we retry
ret_val = t_global.redcon.msetnx( { key: value } )
if ret_val == 0:
t_global.log.warning("Failed attempt %d to set key '%s' with value '%s' once", counter, key, value)
backoff(counter)
return RC_SUCCESS
def key_set(key, value):
'''Set a key in redis if it does not already exist'''
# in this case we want to return the true/false behavior so the
# caller knows if they set the key or it already existed
return t_global.redcon.msetnx( { key: value } )
def key_check(key):
'''Check if a key already exists in redis'''
# inform the caller whether the key already existed or not
return t_global.redcon.exists(key)
def list_append(key, value):
'''Append a value to a list in redis'''
ret_val = 0
counter = 0
while ret_val == 0:
# if this call returns 0 then it failed somehow since it
# should be the size of the list after we have added to it, so
# we retry
ret_val = t_global.redcon.rpush(key, value)
if ret_val == 0:
t_global.log.warning("Failed attempt %d to append value '%s' to key '%s'", counter, value, key)
backoff(counter)
return ret_val
def list_fetch(key, offset):
'''Fetch a list from redis'''
# return the elements in the specified range (offset to end), this
# could be empty so we can't really verify it
return t_global.redcon.lrange(key, offset, -1)
def backoff(attempts):
'''Control the rate of retries depending on how many have been attempted'''
if attempts <= 10:
# no back off, try really hard (spin)
pass
elif 10 < attempts <= 50:
# back off a bit, don't spin as quickly
time.sleep(0.1)
else:
# back off more, spin even slower
time.sleep(0.5)
return RC_SUCCESS
def process_options ():
'''Define the CLI argument parsing options'''
parser = argparse.ArgumentParser(description="Roadblock provides multi entity (system, vm, container, etc.) synchronization.")
parser.add_argument("--uuid",
dest = "roadblock_uuid",
help = "UUID that maps to the specific roadblock being processed.",
required = True)
parser.add_argument("--role",
dest = "roadblock_role",
help = "What is the roadblock role of this node.",
default = "follower",
choices = ["leader", "follower"])
parser.add_argument("--timeout",
dest = "roadblock_timeout",
help = "How long should the roadblock wait before timing out.",
default = 30,
type = int)
parser.add_argument("--follower-id",
dest = "roadblock_follower_id",
help = "What is follower ID for this node.",
default = socket.getfqdn(),
type = str)
parser.add_argument("--leader-id",
dest = "roadblock_leader_id",
help = "What is leader ID for this specific roadblock.",
type = str)
parser.add_argument("--redis-server",
dest = "roadblock_redis_server",
help = "What is network name for the redis server (hostname or IP address).",
default = "localhost",
type = str)
parser.add_argument("--redis-password",
dest = "roadblock_redis_password",
help = "What is password used to connect to the redis server.",
default = "foobar",
type = str)
parser.add_argument("--followers",
dest = "roadblock_followers",
help = "Use one or more times on the leader to specify the followers by name.",
action = "append",
type = str)
parser.add_argument("--abort",
dest = "abort",
help = "Use this option as a follower or leader to send an abort message as part of this synchronization",
action = "store_true")
parser.add_argument("--message-log",
dest = "message_log",
help = "File to log all received messages to.",
default = None,
type = str)
parser.add_argument("--user-messages",
dest = "user_messages",
help = "File to load user specified messages from.",
default = None,
type = str)
parser.add_argument("--log-level",
dest = "log_level",
help = "Control how much logging output should be generated",
default = "normal",
choices = [ "normal", "debug" ])
t_global.args = parser.parse_args()
if t_global.args.log_level == 'debug':
logging.basicConfig(level = logging.DEBUG, format = t_global.log_debug_format, stream = sys.stdout)
elif t_global.args.log_level == 'normal':
logging.basicConfig(level = logging.INFO, format = t_global.log_normal_format, stream = sys.stdout)
t_global.log = logging.getLogger(__file__)
def cleanup():
'''Cleanup the roadblock before exiting'''
if t_global.alarm_active:
t_global.log.info("Disabling timeout alarm")
signal.alarm(0)
if t_global.con_pool_state:
if t_global.args.roadblock_role == "leader":
t_global.log.info("Removing db objects specific to this roadblock")
key_delete(t_global.args.roadblock_uuid)
key_delete(t_global.args.roadblock_uuid + "__initialized")
key_delete(t_global.args.roadblock_uuid + "__busA")
t_global.log.info("Closing connection pool watchdog")
t_global.con_watchdog_exit.set()
t_global.con_watchdog.join()
t_global.log.info("Closing connection pool")
t_global.con_pool.disconnect()
t_global.con_pool_state = False
if t_global.message_log is not None:
# if the message log is open then dump the message queue and
# close the file handle
print("%s\n" % (json.dumps(t_global.messages, indent = 4, separators=(',', ': '), sort_keys = False)), file=t_global.message_log)
t_global.message_log.close()
t_global.log.debug("Processed Messages:")
for msg in t_global.processed_messages:
t_global.log.debug("\t%s", msg)
return RC_SUCCESS
def get_followers_list(followers):
'''Generate a list of the followers'''
followers_list = ""
for follower in followers:
followers_list += follower + " "
return followers_list
def do_timeout():
'''Handle a roadblock timeout event'''
t_global.log.critical("Roadblock failed with timeout")
if t_global.con_pool_state and t_global.initiator:
# set a persistent flag that the roadblock timed out so that
# any late arriving members know that the roadblock has
# already failed. done by the first member since that is the
# only member that is guaranteed to have actually reached the
# roadblock and be capable of setting this.
key_set_once(t_global.args.roadblock_uuid + "__timedout", int(True))
cleanup()
if t_global.args.roadblock_role == "leader":
if len(t_global.followers["online"]) != 0:
t_global.log.critical("These followers never reached 'online': %s", get_followers_list(t_global.followers["online"]))
elif len(t_global.followers["ready"]) != 0:
t_global.log.critical("These followers never reached 'ready': %s", get_followers_list(t_global.followers["ready"]))
elif len(t_global.followers["gone"]) != 0:
t_global.log.critical("These followers never reach 'gone': %s", get_followers_list(t_global.followers["gone"]))
sys.exit(RC_TIMEOUT)
def sighandler(signum, frame):
'''Handle signals delivered to the process'''
if signum == 14: # SIGALRM
t_global.alarm_active = False
do_timeout()
else:
t_global.log.info("Signal handler called with signal %d", signum)
return RC_SUCCESS
def connection_watchdog():
'''Check if the redis connection is still open'''
while not t_global.con_watchdog_exit.is_set():
time.sleep(1)
try:
if t_global.con_pool_state:
t_global.redcon.ping()
else:
t_global.log.error("con_pool_state=False")
except redis.exceptions.ConnectionError as con_error:
t_global.con_pool_state = False
t_global.log.error("%s", con_error)
t_global.log.error("Redis connection failed")
return RC_SUCCESS
def main():
'''Main control block'''
process_options()
if len(t_global.args.roadblock_leader_id) == 0:
t_global.log.critical("You must specify the leader's ID using --leader-id")
return RC_INVALID_INPUT
if t_global.args.roadblock_role == "leader":
if len(t_global.args.roadblock_followers) == 0:
t_global.log.critical("There must be at least one follower")
return RC_INVALID_INPUT
if t_global.args.abort:
t_global.leader_abort = True
# build some hashes for easy tracking of follower status
for follower in t_global.args.roadblock_followers:
t_global.followers["online"][follower] = True
t_global.followers["ready"][follower] = True
t_global.followers["gone"][follower] = True
if t_global.args.roadblock_role == "follower":
t_global.my_id = t_global.args.roadblock_follower_id
elif t_global.args.roadblock_role == "leader":
t_global.my_id = t_global.args.roadblock_leader_id
if t_global.args.message_log is not None:
# open the message log, if specified
try:
t_global.message_log = open(t_global.args.message_log, "w", encoding="ascii")
except IOError:
t_global.log.critical("Could not open message log '%s' for writing!", t_global.args.message_log)
return RC_INVALID_INPUT
define_msg_schema()
define_usr_msg_schema()
if t_global.args.user_messages is not None:
# load the user messages, if specified
try:
with open(t_global.args.user_messages, "r", encoding="ascii") as user_messages:
t_global.user_messages = json.load(user_messages)
except IOError:
t_global.log.critical("Could not load the user messages '%s'!", t_global.args.user_messages)
return RC_INVALID_INPUT
try:
jsonschema.validate(instance=t_global.user_messages, schema=t_global.user_schema)
except jsonschema.exceptions.SchemaError as exception:
t_global.log.critical(exception)
t_global.log.critical("Could not JSON validate the user messages!")
return RC_INVALID_INPUT
# define a signal handler that will respond to SIGALRM when a
# timeout even occurs
signal.signal(signal.SIGALRM, sighandler)
# set the default timeout alarm
signal.alarm(t_global.args.roadblock_timeout)
t_global.alarm_active = True
mytime = calendar.timegm(time.gmtime())
t_global.log.info("Current Time: %s", datetime.datetime.utcfromtimestamp(mytime).strftime("%Y-%m-%d at %H:%M:%S UTC"))
cluster_timeout = mytime + t_global.args.roadblock_timeout
t_global.log.info("Timeout: %s", datetime.datetime.utcfromtimestamp(cluster_timeout).strftime("%Y-%m-%d at %H:%M:%S UTC"))
# create the redis connections
while not t_global.con_pool_state:
try:
t_global.con_pool = redis.ConnectionPool(host = t_global.args.roadblock_redis_server,
password = t_global.args.roadblock_redis_password,
port = 6379,
db = 0,
health_check_interval = 0)
t_global.redcon = redis.Redis(connection_pool = t_global.con_pool)
t_global.redcon.ping()
t_global.con_pool_state = True
except redis.exceptions.ConnectionError as con_error:
t_global.log.error("%s", con_error)
t_global.log.error("Redis connection could not be opened!")
time.sleep(3)
t_global.pubsubcon = t_global.redcon.pubsub(ignore_subscribe_messages = True)
t_global.con_watchdog_exit = threading.Event()
t_global.con_watchdog = threading.Thread(target = connection_watchdog, args = ())
t_global.con_watchdog.start()
t_global.log.info("Roadblock UUID: %s", t_global.args.roadblock_uuid)
t_global.log.info("Role: %s", t_global.args.roadblock_role)
if t_global.args.roadblock_role == "follower":
t_global.log.info("Follower ID: %s", t_global.args.roadblock_follower_id)
t_global.log.info("Leader ID: %s", t_global.args.roadblock_leader_id)
elif t_global.args.roadblock_role == "leader":
t_global.log.info("Leader ID: %s", t_global.args.roadblock_leader_id)
t_global.log.info("Total followers: %d", len(t_global.args.roadblock_followers))
t_global.log.info("Followers: %s", t_global.args.roadblock_followers)
if t_global.args.abort:
t_global.log.info("Abort: True")
else:
t_global.log.info("Abort: False")
# check if the roadblock was previously created and already timed
# out -- ie. I am very late
if key_check(t_global.args.roadblock_uuid + "__timedout"):
t_global.log.critical("Detected previous timeout for this roadblock")
do_timeout()
# check if the roadblock has been initialized yet
if key_set(t_global.args.roadblock_uuid, mytime):
# i am creating the roadblock
t_global.initiator = True
t_global.log.info("Initiator: True")
# set bus monitoring options
t_global.watch_busA = False
t_global.watch_busB = True
t_global.mirror_busB = True
# create busA
list_append(t_global.args.roadblock_uuid + "__busA", message_to_str(message_build("all", "all", "initialized")))
# create/subscribe to busB
t_global.pubsubcon.subscribe(t_global.args.roadblock_uuid + "__busB")
# publish the cluster timeout to busB
t_global.log.info("Sending 'timeout-ts' message")
message_publish(message_build("all", "all", "timeout-ts", cluster_timeout))
# publish the initiator information to busB
t_global.log.info("Sending 'initiator-info' message")
message_publish(message_build("all", "all", "initiator-info"))
t_global.initiator_type = t_global.args.roadblock_role
t_global.initiator_id = t_global.my_id
list_append(t_global.args.roadblock_uuid + "__initialized", int(True))
else:
t_global.log.info("Initiator: False")
# the roadblock already exists, make sure it is initialized
# completely before proceeding
t_global.log.info("Waiting for roadblock initialization to complete")
# wait until the initialized flag has been set for the roadblock
while not key_check(t_global.args.roadblock_uuid + "__initialized"):
time.sleep(1)
t_global.log.info(".")
t_global.log.info("Roadblock is initialized")
# subscribe to busB
t_global.pubsubcon.subscribe(t_global.args.roadblock_uuid + "__busB")
# message myself on busB, once I receive this message on busA I will know I have processed all outstanding busA message and can move to monitoring busB
t_global.log.debug("Sending 'switch-buses' message")
message_publish(message_build_custom(t_global.args.roadblock_role, "switch-buses", t_global.args.roadblock_role, t_global.my_id, "switch-buses"))
if t_global.args.roadblock_role == "follower":
# tell the leader that I am online
t_global.log.info("Sending 'follower-online' message")
message_publish(message_build("leader", t_global.args.roadblock_leader_id, "follower-online"))
elif t_global.args.roadblock_role == "leader":
# tell everyone that the leader is online
t_global.log.info("Sending 'leader-online' message")
message_publish(message_build("all", "all", "leader-online"))
if t_global.initiator:
# the initiator (first member to get to the roadblock) is
# responsible for consuming messages from busB and copying
# them onto busA so that they are preserved for other members
# to receive once they arrive at the roadblock
while t_global.mirror_busB:
msg = t_global.pubsubcon.get_message()
if not msg:
time.sleep(0.001)
else:
msg_str = msg["data"].decode()
t_global.log.debug("initiator received msg=[%s] on busB", msg_str)
msg = message_from_str(msg_str)
if not message_validate(msg):
t_global.log.error("initiator received a message which did not validate! [%s]", msg_str)
else:
# copy the message over to busA
t_global.log.debug("initiator mirroring msg=[%s] to busA", msg_str)
list_append(t_global.args.roadblock_uuid + "__busA", msg_str)
if not message_for_me(msg):
t_global.log.debug("initiator received a message which is not for me! [%s]", msg_str)
else:
t_global.log.debug("initiator received a message for me! [%s]", msg_str)
ret_val = message_handle(msg)
if ret_val:
return ret_val
if not t_global.mirror_busB:
t_global.log.debug("initiator stopping busB mirroring to busA")
else:
msg_list_index = -1
while t_global.watch_busA:
# retrieve unprocessed messages from busA
msg_list = list_fetch(t_global.args.roadblock_uuid + "__busA", msg_list_index+1)
# process any retrieved messages
if len(msg_list):
for msg_str in msg_list:
msg_list_index += 1
t_global.log.debug("received msg=[%s] on busA with status_index=[%d]", msg_str, msg_list_index)
msg = message_from_str(msg_str)
if not message_validate(msg):
t_global.log.error("received a message which did not validate! [%s]", msg_str)
else:
if not message_for_me(msg):
t_global.log.debug("received a message which is not for me!")
else:
t_global.log.debug("received a message which is for me!")
ret_val = message_handle(msg)
if ret_val:
return ret_val
if t_global.watch_busA:
time.sleep(1)
t_global.log.debug("moving to common busB watch loop")
while t_global.watch_busB:
msg = t_global.pubsubcon.get_message()
if not msg:
time.sleep(0.001)
else:
msg_str = msg["data"].decode()
t_global.log.debug("received msg=[%s] on busB", msg_str)
msg = message_from_str(msg_str)
if not message_validate(msg):
t_global.log.error("received a message which did not validate! [%s]", msg_str)
else:
if not message_for_me(msg):
t_global.log.debug("received a message which is not for me!")
else:
t_global.log.debug("received a message for me!")
ret_val = message_handle(msg)
if ret_val:
return ret_val
t_global.log.info("Cleaning up")
cleanup()
t_global.log.info("Exiting")
if t_global.leader_abort is True or t_global.follower_abort is True:
t_global.log.info("Roadblock Completed with an Abort")
return RC_ABORT
else:
t_global.log.info("Roadblock Completed Successfully")
return RC_SUCCESS
if __name__ == "__main__":
t_global = global_vars()
sys.exit(main())
|
websocket.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: leeyoshinari
import os
import json
import logging
import traceback
from threading import Thread
from shell.models import Servers
from channels.generic.websocket import WebsocketConsumer
from .ssh import SSH
logger = logging.getLogger('django')
class WebSSH(WebsocketConsumer):
def connect(self):
"""
open websocket
:return:
"""
self.accept()
logger.info('socket connect success! ')
def sshConnect(self,ssh_args):
auth = ssh_args.get('auth')
# ssh_key_name = ssh_args.get('ssh_key')
ssh_key_name=""
self.ssh = SSH(websocket=self)
ssh_connect_dict = {
'host': ssh_args.get('host'),
'user': ssh_args.get('user'),
'password': ssh_args.get('password'),
'port': int(ssh_args.get('port')),
'timeout': 30,
'pty_width': ssh_args.get('width'),
'pty_height': ssh_args.get('height'),
'current_time': str(ssh_args.get('time'))
}
if auth == 'key':
ssh_key_file = os.path.join('tmp', ssh_key_name)
with open(ssh_key_file, 'r') as f:
ssh_key = f.read()
from six import StringIO
string_io = StringIO()
string_io.write(ssh_key)
string_io.flush()
string_io.seek(0)
ssh_connect_dict['ssh_key'] = string_io
os.remove(ssh_key_file)
self.ssh.connect(**ssh_connect_dict)
def disconnect(self, close_code):
try:
self.ssh.close()
except:
pass
def receive(self, text_data=None, bytes_data=None):
data = json.loads(text_data)
if data.get('type')=="web":
try:
info=Servers.objects.get(host=data.get('host'))
ssh_args={"width":int(data['cols']),"height":int(data['rows']),"auth":"pwd","host":info.host,"user":info.user,"password":info.pwd,"port":info.port, 'time': info.id}
self.sshConnect(ssh_args)
logger.info(f'ssh connect info: {ssh_args}')
except Exception as err:
logger.error(err)
logger.error(traceback.format_exc())
else:
logger.debug(f'input linux command is: {data}')
if data['code'] == 0: # send data
Thread(target=self.ssh.django_to_ssh, args=(data['data'],)).start()
elif data['code'] == 2: # close session
self.ssh.close()
elif data['code'] == 1: # setting terminal size
self.ssh.resize_pty(cols=data['cols'], rows=data['rows'])
|
main.py | #!/usr/bin/env python3
"""Implement a remote shell which talks to a MicroPython board.
This program uses the raw-repl feature of the pyboard to send small
programs to the pyboard to carry out the required tasks.
"""
# Take a look at https://repolinux.wordpress.com/2012/10/09/non-blocking-read-from-stdin-in-python/
# to see if we can use those ideas here.
# from __future__ import print_function
# To run rshell from the git repository, cd into the top level rshell directory
# and run:
# python3 -m rshell.main
#
# that sets things up so that the "from rshell.xxx" will import from the git
# tree and not from some installed version.
import sys
try:
from rshell.getch import getch
from rshell.pyboard import Pyboard, PyboardError
from rshell.version import __version__
except ImportError as err:
print('sys.path =', sys.path)
raise err
if sys.platform == 'win32':
# This is a workaround for Windows 10/Python 3.7, that allows the colorized output to
# work. See: https://stackoverflow.com/questions/12492810/python-how-can-i-make-the-ansi-escape-codes-to-work-also-in-windows
import subprocess
subprocess.call('', shell=True)
import argparse
import binascii
import calendar
import cmd
import inspect
import os
import fnmatch
import select
import serial
import shutil
import socket
import tempfile
import time
import threading
import shlex
import itertools
from serial.tools import list_ports
import traceback
if sys.platform == 'win32':
EXIT_STR = 'Use the exit command to exit rshell.'
else:
EXIT_STR = 'Use Control-D (or the exit command) to exit rshell.'
# I got the following from: http://www.farmckon.net/2009/08/rlcompleter-how-do-i-get-it-to-work/
# Under OSX, if you call input with a prompt which contains ANSI escape
# sequences for colors, and readline is installed, then the escape sequences
# do not get rendered properly as colors.
#
# One solution would be to not use readline, but then you'd lose TAB completion.
# So I opted to print the colored prompt before calling input, which makes
# things work most of the time. If you try to backspace when at the first
# column of the input it wipes out the prompt, but everything returns to normal
# if you hit return.
BROKEN_READLINE = True
FAKE_INPUT_PROMPT = False
import readline
import rlcompleter
if readline.__doc__ and 'libedit' in readline.__doc__:
readline.parse_and_bind ("bind ^I rl_complete")
BROKEN_READLINE = True
else:
readline.parse_and_bind("tab: complete")
MONTH = ('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
# Attributes
# 0 Reset all attributes
# 1 Bright
# 2 Dim
# 4 Underscore
# 5 Blink
# 7 Reverse
# 8 Hidden
LT_BLACK = "\x1b[1;30m"
LT_RED = "\x1b[1;31m"
LT_GREEN = "\x1b[1;32m"
LT_YELLOW = "\x1b[1;33m"
LT_BLUE = "\x1b[1;34m"
LT_MAGENTA = "\x1b[1;35m"
LT_CYAN = "\x1b[1;36m"
LT_WHITE = "\x1b[1;37m"
DK_BLACK = "\x1b[2;30m"
DK_RED = "\x1b[2;31m"
DK_GREEN = "\x1b[2;32m"
DK_YELLOW = "\x1b[2;33m"
DK_BLUE = "\x1b[2;34m"
DK_MAGENTA = "\x1b[2;35m"
DK_CYAN = "\x1b[2;36m"
DK_WHITE = "\x1b[2;37m"
NO_COLOR = "\x1b[0m"
BG_LT_BLACK = "\x1b[1;40m"
BG_LT_RED = "\x1b[1;41m"
BG_LT_GREEN = "\x1b[1;42m"
BG_LT_YELLOW = "\x1b[1;43m"
BG_LT_BLUE = "\x1b[1;44m"
BG_LT_MAGENTA = "\x1b[1;45m"
BG_LT_CYAN = "\x1b[1;46m"
BG_LT_WHITE = "\x1b[1;47m"
BG_DK_BLACK = "\x1b[2;40m"
BG_DK_RED = "\x1b[2;41m"
BG_DK_GREEN = "\x1b[2;42m"
BG_DK_YELLOW = "\x1b[2;43m"
BG_DK_BLUE = "\x1b[2;44m"
BG_DK_MAGENTA = "\x1b[2;45m"
BG_DK_CYAN = "\x1b[2;46m"
BG_DK_WHITE = "\x1b[2;47m"
DIR_COLOR = LT_CYAN
PROMPT_COLOR = LT_GREEN
PY_COLOR = DK_GREEN
END_COLOR = NO_COLOR
cur_dir = ''
HAS_BUFFER = False
IS_UPY = False
DEBUG = False
BUFFER_SIZE = 512
QUIET = False
# It turns out that just because pyudev is installed doesn't mean that
# it can actually be used. So we only bother to try if we're running
# under linux.
USE_AUTOCONNECT = sys.platform == 'linux'
SIX_MONTHS = 183 * 24 * 60 * 60
QUIT_REPL_CHAR = 'X'
QUIT_REPL_BYTE = bytes((ord(QUIT_REPL_CHAR) - ord('@'),)) # Control-X
# DELIMS is used by readline for determining word boundaries.
DELIMS = ' \t\n>;'
TIME_OFFSET = 0
DEVS = []
DEFAULT_DEV = None
DEV_IDX = 1
DEV_LOCK = threading.RLock()
def add_device(dev):
"""Adds a device to the list of devices we know about."""
global DEV_IDX, DEFAULT_DEV
with DEV_LOCK:
for idx in range(len(DEVS)):
test_dev = DEVS[idx]
if test_dev.dev_name_short == dev.dev_name_short:
# This device is already in our list. Delete the old one
if test_dev is DEFAULT_DEV:
DEFAULT_DEV = None
del DEVS[idx]
break
if find_device_by_name(dev.name):
# This name is taken - make it unique
dev.name += '-%d' % DEV_IDX
dev.name_path = '/' + dev.name + '/'
DEVS.append(dev)
DEV_IDX += 1
if DEFAULT_DEV is None:
DEFAULT_DEV = dev
def find_device_by_name(name):
"""Tries to find a board by board name."""
if not name:
return DEFAULT_DEV
with DEV_LOCK:
for dev in DEVS:
if dev.name == name:
return dev
return None
def find_serial_device_by_port(port):
"""Tries to find a board by port name."""
with DEV_LOCK:
for dev in DEVS:
if dev.is_serial_port(port):
return dev
return None
def num_devices():
with DEV_LOCK:
return len(DEVS)
def is_micropython_usb_device(port):
"""Checks a USB device to see if it looks like a MicroPython device.
"""
if type(port).__name__ == 'Device':
# Assume its a pyudev.device.Device
if ('ID_BUS' not in port or port['ID_BUS'] != 'usb' or
'SUBSYSTEM' not in port or port['SUBSYSTEM'] != 'tty'):
return False
usb_id = 'usb vid:pid={}:{}'.format(port['ID_VENDOR_ID'], port['ID_MODEL_ID'])
else:
# Assume its a port from serial.tools.list_ports.comports()
usb_id = port[2].lower()
# We don't check the last digit of the PID since there are 3 possible
# values.
if usb_id.startswith('usb vid:pid=f055:980'):
return True
# Check for Teensy VID:PID
if usb_id.startswith('usb vid:pid=16c0:0483'):
return True
return False
def autoconnect():
"""Sets up a thread to detect when USB devices are plugged and unplugged.
If the device looks like a MicroPython board, then it will automatically
connect to it.
"""
if not USE_AUTOCONNECT:
return
try:
import pyudev
except ImportError:
return
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
connect_thread = threading.Thread(target=autoconnect_thread, args=(monitor,), name='AutoConnect')
connect_thread.daemon = True
connect_thread.start()
def autoconnect_thread(monitor):
"""Thread which detects USB Serial devices connecting and disconnecting."""
monitor.start()
monitor.filter_by('tty')
epoll = select.epoll()
epoll.register(monitor.fileno(), select.POLLIN)
while True:
try:
events = epoll.poll()
except InterruptedError:
continue
for fileno, _ in events:
if fileno == monitor.fileno():
usb_dev = monitor.poll()
print('autoconnect: {} action: {}'.format(usb_dev.device_node, usb_dev.action))
dev = find_serial_device_by_port(usb_dev.device_node)
if usb_dev.action == 'add':
# Try connecting a few times. Sometimes the serial port
# reports itself as busy, which causes the connection to fail.
for i in range(8):
if dev:
connected = connect_serial(dev.port, dev.baud, dev.wait)
elif is_micropython_usb_device(usb_dev):
connected = connect_serial(usb_dev.device_node)
else:
connected = False
if connected:
break
time.sleep(0.25)
elif usb_dev.action == 'remove':
print('')
print("USB Serial device '%s' disconnected" % usb_dev.device_node)
if dev:
dev.close()
break
def autoscan():
"""autoscan will check all of the serial ports to see if they have
a matching VID:PID for a MicroPython board.
"""
for port in serial.tools.list_ports.comports():
if is_micropython_usb_device(port):
connect_serial(port[0])
def escape(str):
"""Precede all special characters with a backslash."""
out = ''
for char in str:
if char in '\\ ':
out += '\\'
out += char
return out
def unescape(str):
"""Undoes the effects of the escape() function."""
out = ''
prev_backslash = False
for char in str:
if not prev_backslash and char == '\\':
prev_backslash = True
continue
out += char
prev_backslash = False
return out
def align_cell(fmt, elem, width):
"""Returns an aligned element."""
if fmt == "<":
return elem + ' ' * (width - len(elem))
if fmt == ">":
return ' ' * (width - len(elem)) + elem
return elem
def column_print(fmt, rows, print_func):
"""Prints a formatted list, adjusting the width so everything fits.
fmt contains a single character for each column. < indicates that the
column should be left justified, > indicates that the column should
be right justified. The last column may be a space which implies left
justification and no padding.
"""
# Figure out the max width of each column
num_cols = len(fmt)
width = [max(0 if isinstance(row, str) else len(row[i]) for row in rows)
for i in range(num_cols)]
for row in rows:
if isinstance(row, str):
# Print a separator line
print_func(' '.join([row * width[i] for i in range(num_cols)]))
else:
print_func(' '.join([align_cell(fmt[i], row[i], width[i])
for i in range(num_cols)]))
def find_macthing_files(match):
"""Finds all of the files which match (used for completion)."""
last_slash = match.rfind('/')
if last_slash == -1:
dirname = '.'
match_prefix = match
result_prefix = ''
else:
dirname = match[0:last_slash]
match_prefix = match[last_slash + 1:]
result_prefix = dirname + '/'
return [result_prefix + filename for filename in os.listdir(dirname) if filename.startswith(match_prefix)]
def print_err(*args, end='\n'):
"""Similar to print, but prints to stderr.
"""
print(*args, end=end, file=sys.stderr)
sys.stderr.flush()
def is_pattern(s):
"""Return True if a string contains Unix wildcard pattern characters.
"""
return not set('*?[{').intersection(set(s)) == set()
# Disallow patterns like path/t*/bar* because handling them on remote
# system is difficult without the glob library.
def parse_pattern(s):
"""Parse a string such as 'foo/bar/*.py'
Assumes is_pattern(s) has been called and returned True
1. directory to process
2. pattern to match"""
if '{' in s:
return None, None # Unsupported by fnmatch
if s and s[0] == '~':
s = os.path.expanduser(s)
parts = s.split('/')
absolute = len(parts) > 1 and not parts[0]
if parts[-1] == '': # # Outcome of trailing /
parts = parts[:-1] # discard
if len(parts) == 0:
directory = ''
pattern = ''
else:
directory = '/'.join(parts[:-1])
pattern = parts[-1]
if not is_pattern(directory): # Check for e.g. /abc/*/def
if is_pattern(pattern):
if not directory:
directory = '/' if absolute else '.'
return directory, pattern
return None, None # Invalid or nonexistent pattern
def validate_pattern(fn):
"""On success return an absolute path and a pattern.
Otherwise print a message and return None, None
"""
directory, pattern = parse_pattern(fn)
if directory is None:
print_err("Invalid pattern {}.".format(fn))
return None, None
target = resolve_path(directory)
mode = auto(get_mode, target)
if not mode_exists(mode):
print_err("cannot access '{}': No such file or directory".format(fn))
return None, None
if not mode_isdir(mode):
print_err("cannot access '{}': Not a directory".format(fn))
return None, None
return directory, pattern
def process_pattern(fn):
"""Return a list of paths matching a pattern (or None on error).
"""
directory, pattern = validate_pattern(fn)
if directory is not None:
filenames = fnmatch.filter(auto(listdir, directory), pattern)
if filenames:
return [directory + '/' + sfn for sfn in filenames]
else:
print_err("cannot access '{}': No such file or directory".format(fn))
def resolve_path(path):
"""Resolves path and converts it into an absolute path."""
if path[0] == '~':
# ~ or ~user
path = os.path.expanduser(path)
if path[0] != '/':
# Relative path
if cur_dir[-1] == '/':
path = cur_dir + path
else:
path = cur_dir + '/' + path
comps = path.split('/')
new_comps = []
for comp in comps:
# We strip out xxx/./xxx and xxx//xxx, except that we want to keep the
# leading / for absolute paths. This also removes the trailing slash
# that autocompletion adds to a directory.
if comp == '.' or (comp == '' and len(new_comps) > 0):
continue
if comp == '..':
if len(new_comps) > 1:
new_comps.pop()
else:
new_comps.append(comp)
if len(new_comps) == 1 and new_comps[0] == '':
return '/'
return '/'.join(new_comps)
def get_dev_and_path(filename):
"""Determines if a given file is located locally or remotely. We assume
that any directories from the pyboard take precedence over local
directories of the same name. /flash and /sdcard are associated with
the default device. /dev_name/path where dev_name is the name of a
given device is also considered to be associated with the named device.
If the file is associated with a remote device, then this function
returns a tuple (dev, dev_filename) where dev is the device and
dev_filename is the portion of the filename relative to the device.
If the file is not associated with the remote device, then the dev
portion of the returned tuple will be None.
"""
if DEFAULT_DEV:
if DEFAULT_DEV.is_root_path(filename):
return (DEFAULT_DEV, filename)
test_filename = filename + '/'
with DEV_LOCK:
for dev in DEVS:
if test_filename.startswith(dev.name_path):
dev_filename = filename[len(dev.name_path)-1:]
if dev_filename == '':
dev_filename = '/'
return (dev, dev_filename)
return (None, filename)
def remote_repr(i):
"""Helper function to deal with types which we can't send to the pyboard."""
repr_str = repr(i)
if repr_str and repr_str[0] == '<':
return 'None'
return repr_str
def print_bytes(byte_str):
"""Prints a string or converts bytes to a string and then prints."""
if isinstance(byte_str, str):
print(byte_str)
else:
print(str(byte_str, encoding='utf8'))
def extra_funcs(*funcs):
"""Decorator which adds extra functions to be downloaded to the pyboard."""
def extra_funcs_decorator(real_func):
def wrapper(*args, **kwargs):
return real_func(*args, **kwargs)
wrapper.extra_funcs = list(funcs)
wrapper.source = inspect.getsource(real_func)
wrapper.name = real_func.__name__
return wrapper
return extra_funcs_decorator
def auto(func, filename, *args, **kwargs):
"""If `filename` is a remote file, then this function calls func on the
micropython board, otherwise it calls it locally.
"""
dev, dev_filename = get_dev_and_path(filename)
if dev is None:
if dev_filename[0] == '~':
dev_filename = os.path.expanduser(dev_filename)
return func(dev_filename, *args, **kwargs)
return dev.remote_eval(func, dev_filename, *args, **kwargs)
def board_name(default):
"""Returns the boards name (if available)."""
try:
import board
name = board.name
except ImportError:
name = default
return repr(name)
def cat(src_filename, dst_file):
"""Copies the contents of the indicated file to an already opened file."""
(dev, dev_filename) = get_dev_and_path(src_filename)
if dev is None:
with open(dev_filename, 'rb') as txtfile:
for line in txtfile:
dst_file.write(line)
else:
filesize = dev.remote_eval(get_filesize, dev_filename)
return dev.remote(send_file_to_host, dev_filename, dst_file, filesize,
xfer_func=recv_file_from_remote)
def chdir(dirname):
"""Changes the current working directory."""
import os
os.chdir(dirname)
def copy_file(src_filename, dst_filename):
"""Copies a file from one place to another. Both the source and destination
files must exist on the same machine.
"""
try:
with open(src_filename, 'rb') as src_file:
with open(dst_filename, 'wb') as dst_file:
while True:
buf = src_file.read(BUFFER_SIZE)
if len(buf) > 0:
dst_file.write(buf)
if len(buf) < BUFFER_SIZE:
break
return True
except:
return False
def cp(src_filename, dst_filename):
"""Copies one file to another. The source file may be local or remote and
the destination file may be local or remote.
"""
src_dev, src_dev_filename = get_dev_and_path(src_filename)
dst_dev, dst_dev_filename = get_dev_and_path(dst_filename)
if src_dev is dst_dev:
# src and dst are either on the same remote, or both are on the host
return auto(copy_file, src_filename, dst_dev_filename)
filesize = auto(get_filesize, src_filename)
if dst_dev is None:
# Copying from remote to host
with open(dst_dev_filename, 'wb') as dst_file:
return src_dev.remote(send_file_to_host, src_dev_filename, dst_file,
filesize, xfer_func=recv_file_from_remote)
if src_dev is None:
# Copying from host to remote
with open(src_dev_filename, 'rb') as src_file:
return dst_dev.remote(recv_file_from_host, src_file, dst_dev_filename,
filesize, xfer_func=send_file_to_remote)
# Copying from remote A to remote B. We first copy the file
# from remote A to the host and then from the host to remote B
host_temp_file = tempfile.TemporaryFile()
if src_dev.remote(send_file_to_host, src_dev_filename, host_temp_file,
filesize, xfer_func=recv_file_from_remote):
host_temp_file.seek(0)
return dst_dev.remote(recv_file_from_host, host_temp_file, dst_dev_filename,
filesize, xfer_func=send_file_to_remote)
return False
def eval_str(string):
"""Executes a string containing python code."""
output = eval(string)
return output
def get_filesize(filename):
"""Returns the size of a file, in bytes."""
import os
try:
# Since this function runs remotely, it can't depend on other functions,
# so we can't call stat_mode.
return os.stat(filename)[6]
except OSError:
return -1
def get_mode(filename):
"""Returns the mode of a file, which can be used to determine if a file
exists, if a file is a file or a directory.
"""
import os
try:
# Since this function runs remotely, it can't depend on other functions,
# so we can't call stat_mode.
return os.stat(filename)[0]
except OSError:
return 0
def stat(filename):
"""Returns os.stat for a given file, adjusting the timestamps as appropriate."""
import os
rstat = os.stat(filename)
return rstat[:7] + tuple(tim + TIME_OFFSET for tim in rstat[7:])
def is_visible(filename):
"""Determines if the file should be considered to be a non-hidden file."""
return filename[0] != '.' and filename[-1] != '~'
@extra_funcs(stat)
def get_stat(filename):
"""Returns the stat array for a given file. Returns all 0's if the file
doesn't exist.
"""
try:
return stat(filename)
except OSError:
return (0,) * 10
def listdir(dirname):
"""Returns a list of filenames contained in the named directory."""
import os
return os.listdir(dirname)
def listdir_matches(match):
"""Returns a list of filenames contained in the named directory.
Only filenames which start with `match` will be returned.
Directories will have a trailing slash.
"""
import os
last_slash = match.rfind('/')
if last_slash == -1:
dirname = '.'
match_prefix = match
result_prefix = ''
else:
match_prefix = match[last_slash + 1:]
if last_slash == 0:
dirname = '/'
result_prefix = '/'
else:
dirname = match[0:last_slash]
result_prefix = dirname + '/'
def add_suffix_if_dir(filename):
if (os.stat(filename)[0] & 0x4000) != 0:
return filename + '/'
return filename
matches = [add_suffix_if_dir(result_prefix + filename)
for filename in os.listdir(dirname) if filename.startswith(match_prefix)]
return matches
@extra_funcs(is_visible, stat)
def listdir_stat(dirname, show_hidden=True):
"""Returns a list of tuples for each file contained in the named
directory, or None if the directory does not exist. Each tuple
contains the filename, followed by the tuple returned by
calling os.stat on the filename.
"""
import os
try:
files = os.listdir(dirname)
except OSError:
return None
if dirname == '/':
return list((file, stat('/' + file)) for file in files if is_visible(file) or show_hidden)
return list((file, stat(dirname + '/' + file)) for file in files if is_visible(file) or show_hidden)
def make_directory(dirname):
"""Creates one or more directories."""
import os
try:
os.mkdir(dirname)
except:
return False
return True
def mkdir(filename):
"""Creates a directory."""
return auto(make_directory, filename)
def remove_file(filename, recursive=False, force=False):
"""Removes a file or directory."""
import os
try:
mode = os.stat(filename)[0]
if mode & 0x4000 != 0:
# directory
if recursive:
for file in os.listdir(filename):
success = remove_file(filename + '/' + file, recursive, force)
if not success and not force:
return False
os.rmdir(filename) # PGH Work like Unix: require recursive
else:
if not force:
return False
else:
os.remove(filename)
except:
if not force:
return False
return True
def rm(filename, recursive=False, force=False):
"""Removes a file or directory tree."""
return auto(remove_file, filename, recursive, force)
def make_dir(dst_dir, dry_run, print_func, recursed):
"""Creates a directory. Produces information in case of dry run.
Issues error where necessary.
"""
parent = os.path.split(dst_dir.rstrip('/'))[0] # Check for nonexistent parent
parent_files = auto(listdir_stat, parent) if parent else True # Relative dir
if dry_run:
if recursed: # Assume success: parent not actually created yet
print_func("Creating directory {}".format(dst_dir))
elif parent_files is None:
print_func("Unable to create {}".format(dst_dir))
return True
if not mkdir(dst_dir):
print_err("Unable to create {}".format(dst_dir))
return False
return True
def rsync(src_dir, dst_dir, mirror, dry_run, print_func, recursed, sync_hidden):
"""Synchronizes 2 directory trees."""
# This test is a hack to avoid errors when accessing /flash. When the
# cache synchronisation issue is solved it should be removed
if not isinstance(src_dir, str) or not len(src_dir):
return
sstat = auto(get_stat, src_dir)
smode = stat_mode(sstat)
if mode_isfile(smode):
print_err('Source is a file not a directory.')
return
d_src = {} # Look up stat tuple from name in current directory
src_files = auto(listdir_stat, src_dir, show_hidden=sync_hidden)
if src_files is None:
print_err('Source directory {} does not exist.'.format(src_dir))
return
for name, stat in src_files:
d_src[name] = stat
d_dst = {}
dst_files = auto(listdir_stat, dst_dir, show_hidden=sync_hidden)
if dst_files is None: # Directory does not exist
if not recursed:
print_err('Destination directory {} does not exist.'.format(dst_dir))
return
if not make_dir(dst_dir, dry_run, print_func, recursed):
return
else: # dest exists
for name, stat in dst_files:
d_dst[name] = stat
set_dst = set(d_dst.keys())
set_src = set(d_src.keys())
to_add = set_src - set_dst # Files to copy to dest
to_del = set_dst - set_src # To delete from dest
to_upd = set_dst.intersection(set_src) # In both: may need updating
for src_basename in to_add: # Name in source but absent from destination
src_filename = src_dir + '/' + src_basename
dst_filename = dst_dir + '/' + src_basename
print_func("Adding %s" % dst_filename)
src_stat = d_src[src_basename]
src_mode = stat_mode(src_stat)
if not dry_run:
if not mode_isdir(src_mode):
cp(src_filename, dst_filename)
if mode_isdir(src_mode):
rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run,
print_func=print_func, recursed=True, sync_hidden=sync_hidden)
if mirror: # May delete
for dst_basename in to_del: # In dest but not in source
dst_filename = dst_dir + '/' + dst_basename
print_func("Removing %s" % dst_filename)
if not dry_run:
rm(dst_filename, recursive=True, force=True)
for src_basename in to_upd: # Names are identical
src_stat = d_src[src_basename]
dst_stat = d_dst[src_basename]
src_filename = src_dir + '/' + src_basename
dst_filename = dst_dir + '/' + src_basename
src_mode = stat_mode(src_stat)
dst_mode = stat_mode(dst_stat)
if mode_isdir(src_mode):
if mode_isdir(dst_mode):
# src and dst are both directories - recurse
rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run,
print_func=print_func, recursed=True, sync_hidden=sync_hidden)
else:
msg = "Source '{}' is a directory and destination " \
"'{}' is a file. Ignoring"
print_err(msg.format(src_filename, dst_filename))
else:
if mode_isdir(dst_mode):
msg = "Source '{}' is a file and destination " \
"'{}' is a directory. Ignoring"
print_err(msg.format(src_filename, dst_filename))
else:
if stat_mtime(src_stat) > stat_mtime(dst_stat):
msg = "{} is newer than {} - copying"
print_func(msg.format(src_filename, dst_filename))
if not dry_run:
cp(src_filename, dst_filename)
def set_time(rtc_time):
rtc = None
try:
import pyb
rtc = pyb.RTC()
rtc.datetime(rtc_time)
except:
try:
import machine
rtc = machine.RTC()
rtc.datetime(rtc_time)
except:
pass
# 0x0D's sent from the host get transformed into 0x0A's, and 0x0A sent to the
# host get converted into 0x0D0A when using sys.stdin. sys.tsin.buffer does
# no transformations, so if that's available, we use it, otherwise we need
# to use hexlify in order to get unaltered data.
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'):
"""Function which runs on the pyboard. Matches up with send_file_to_remote."""
import sys
import ubinascii
if HAS_BUFFER:
try:
import pyb
usb = pyb.USB_VCP()
except:
try:
import machine
usb = machine.USB_VCP()
except:
usb = None
if usb and usb.isconnected():
# We don't want 0x03 bytes in the data to be interpreted as a Control-C
# This gets reset each time the REPL runs a line, so we don't need to
# worry about resetting it ourselves
usb.setinterrupt(-1)
try:
with open(dst_filename, dst_mode) as dst_file:
bytes_remaining = filesize
if not HAS_BUFFER:
bytes_remaining *= 2 # hexlify makes each byte into 2
buf_size = BUFFER_SIZE
write_buf = bytearray(buf_size)
read_buf = bytearray(buf_size)
while bytes_remaining > 0:
# Send back an ack as a form of flow control
sys.stdout.write('\x06')
read_size = min(bytes_remaining, buf_size)
buf_remaining = read_size
buf_index = 0
while buf_remaining > 0:
if HAS_BUFFER:
bytes_read = sys.stdin.buffer.readinto(read_buf, read_size)
else:
bytes_read = sys.stdin.readinto(read_buf, read_size)
if bytes_read > 0:
write_buf[buf_index:bytes_read] = read_buf[0:bytes_read]
buf_index += bytes_read
buf_remaining -= bytes_read
if HAS_BUFFER:
dst_file.write(write_buf[0:read_size])
else:
dst_file.write(ubinascii.unhexlify(write_buf[0:read_size]))
bytes_remaining -= read_size
return True
except:
return False
def send_file_to_remote(dev, src_file, dst_filename, filesize, dst_mode='wb'):
"""Intended to be passed to the `remote` function as the xfer_func argument.
Matches up with recv_file_from_host.
"""
bytes_remaining = filesize
save_timeout = dev.timeout
dev.timeout = 1
while bytes_remaining > 0:
# Wait for ack so we don't get too far ahead of the remote
ack = dev.read(1)
if ack is None or ack != b'\x06':
sys.stderr.write("timed out or error in transfer to remote\n")
sys.exit(2)
if HAS_BUFFER:
buf_size = BUFFER_SIZE
else:
buf_size = BUFFER_SIZE // 2
read_size = min(bytes_remaining, buf_size)
buf = src_file.read(read_size)
#sys.stdout.write('\r%d/%d' % (filesize - bytes_remaining, filesize))
#sys.stdout.flush()
if HAS_BUFFER:
dev.write(buf)
else:
dev.write(binascii.hexlify(buf))
bytes_remaining -= read_size
#sys.stdout.write('\r')
dev.timeout = save_timeout
def recv_file_from_remote(dev, src_filename, dst_file, filesize):
"""Intended to be passed to the `remote` function as the xfer_func argument.
Matches up with send_file_to_host.
"""
bytes_remaining = filesize
if not HAS_BUFFER:
bytes_remaining *= 2 # hexlify makes each byte into 2
buf_size = BUFFER_SIZE
write_buf = bytearray(buf_size)
while bytes_remaining > 0:
read_size = min(bytes_remaining, buf_size)
buf_remaining = read_size
buf_index = 0
while buf_remaining > 0:
read_buf = dev.read(buf_remaining)
bytes_read = len(read_buf)
if bytes_read:
write_buf[buf_index:bytes_read] = read_buf[0:bytes_read]
buf_index += bytes_read
buf_remaining -= bytes_read
if HAS_BUFFER:
dst_file.write(write_buf[0:read_size])
else:
dst_file.write(binascii.unhexlify(write_buf[0:read_size]))
# Send an ack to the remote as a form of flow control
dev.write(b'\x06') # ASCII ACK is 0x06
bytes_remaining -= read_size
def send_file_to_host(src_filename, dst_file, filesize):
"""Function which runs on the pyboard. Matches up with recv_file_from_remote."""
import sys
import ubinascii
try:
with open(src_filename, 'rb') as src_file:
bytes_remaining = filesize
if HAS_BUFFER:
buf_size = BUFFER_SIZE
else:
buf_size = BUFFER_SIZE // 2
while bytes_remaining > 0:
read_size = min(bytes_remaining, buf_size)
buf = src_file.read(read_size)
if HAS_BUFFER:
sys.stdout.buffer.write(buf)
else:
sys.stdout.write(ubinascii.hexlify(buf))
bytes_remaining -= read_size
# Wait for an ack so we don't get ahead of the remote
while True:
char = sys.stdin.read(1)
if char:
if char == '\x06':
break
# This should only happen if an error occurs
sys.stdout.write(char)
return True
except:
return False
def test_buffer():
"""Checks the micropython firmware to see if sys.stdin.buffer exists."""
import sys
try:
_ = sys.stdin.buffer
return True
except:
return False
def test_readinto():
"""Checks the micropython firmware to see if sys.stdin.readinto exists."""
import sys
try:
_ = sys.stdin.readinto
return True
except:
return False
def test_unhexlify():
"""Checks the micropython firmware to see if ubinascii.unhexlify exists."""
import ubinascii
try:
_ = ubinascii.unhexlify
return True
except:
return False
def get_time_epoch():
"""Determines the epoch used by the MicroPython board."""
import time
try:
return time.gmtime(0)
except:
"""Assume its a pyboard, with an epoch of 2000."""
return (2000, 1, 1, 0, 0, 0, 0, 0)
def mode_exists(mode):
return mode & 0xc000 != 0
def mode_isdir(mode):
return mode & 0x4000 != 0
def mode_isfile(mode):
return mode & 0x8000 != 0
def stat_mode(stat):
"""Returns the mode field from the results returned by os.stat()."""
return stat[0]
def stat_size(stat):
"""Returns the filesize field from the results returned by os.stat()."""
return stat[6]
def stat_mtime(stat):
"""Returns the mtime field from the results returned by os.stat()."""
return stat[8]
def word_len(word):
"""Returns the word length, minus any color codes."""
if word[0] == '\x1b':
return len(word) - 11 # 7 for color, 4 for no-color
return len(word)
def print_cols(words, print_func, termwidth=79):
"""Takes a single column of words, and prints it as multiple columns that
will fit in termwidth columns.
"""
width = max([word_len(word) for word in words])
nwords = len(words)
ncols = max(1, (termwidth + 1) // (width + 1))
nrows = (nwords + ncols - 1) // ncols
for row in range(nrows):
for i in range(row, nwords, nrows):
word = words[i]
if word[0] == '\x1b':
print_func('%-*s' % (width + 11, words[i]),
end='\n' if i + nrows >= nwords else ' ')
else:
print_func('%-*s' % (width, words[i]),
end='\n' if i + nrows >= nwords else ' ')
def decorated_filename(filename, stat):
"""Takes a filename and the stat info and returns the decorated filename.
The decoration takes the form of a single character which follows
the filename. Currently, the only decoration is '/' for directories.
"""
mode = stat[0]
if mode_isdir(mode):
return DIR_COLOR + filename + END_COLOR + '/'
if filename.endswith('.py'):
return PY_COLOR + filename + END_COLOR
return filename
def print_long(filename, stat, print_func):
"""Prints detailed information about the file passed in."""
size = stat_size(stat)
mtime = stat_mtime(stat)
file_mtime = time.localtime(mtime)
curr_time = time.time()
if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS):
print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[0],
decorated_filename(filename, stat)))
else:
print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[3], file_mtime[4],
decorated_filename(filename, stat)))
def trim(docstring):
"""Trims the leading spaces from docstring comments.
From http://www.python.org/dev/peps/pep-0257/
"""
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def add_arg(*args, **kwargs):
"""Returns a list containing args and kwargs."""
return (args, kwargs)
def connect(port, baud=115200, user='micro', password='python', wait=0):
"""Tries to connect automagically via network or serial."""
try:
ip_address = socket.gethostbyname(port)
#print('Connecting to ip', ip_address)
connect_telnet(port, ip_address, user=user, password=password)
except socket.gaierror:
# Doesn't look like a hostname or IP-address, assume its a serial port
#print('connecting to serial', port)
connect_serial(port, baud=baud, wait=wait)
def connect_telnet(name, ip_address=None, user='micro', password='python'):
"""Connect to a MicroPython board via telnet."""
if ip_address is None:
try:
ip_address = socket.gethostbyname(name)
except socket.gaierror:
ip_address = name
if not QUIET:
if name == ip_address:
print('Connecting to (%s) ...' % ip_address)
else:
print('Connecting to %s (%s) ...' % (name, ip_address))
dev = DeviceNet(name, ip_address, user, password)
add_device(dev)
def connect_serial(port, baud=115200, wait=0):
"""Connect to a MicroPython board via a serial port."""
if not QUIET:
print('Connecting to %s ...' % port)
try:
dev = DeviceSerial(port, baud, wait)
except DeviceError as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
add_device(dev)
return True
class SmartFile(object):
"""Class which implements a write method which can takes bytes or str."""
def __init__(self, file):
self.file = file
def close(self):
self.file.close()
def flush(self):
self.file.flush()
def read(self, num_bytes):
return self.file.buffer.read(num_bytes)
def seek(self, pos):
self.file.seek(pos)
def tell(self):
return self.file.tell()
def write(self, data):
if isinstance(data, str):
return self.file.write(data)
return self.file.buffer.write(data)
class DeviceError(Exception):
"""Errors that we want to report to the user and keep running."""
pass
class Device(object):
def __init__(self, pyb):
self.pyb = pyb
self.has_buffer = False # needs to be set for remote_eval to work
self.time_offset = 0
self.adjust_for_timezone = False
if not BINARY_XFER:
self.has_buffer = self.remote_eval(test_buffer)
if self.has_buffer:
if DEBUG:
print("Setting has_buffer to True")
elif not self.remote_eval(test_unhexlify):
raise ShellError('rshell needs MicroPython firmware with ubinascii.unhexlify')
else:
if DEBUG:
print("MicroPython has unhexlify")
self.root_dirs = ['/{}/'.format(dir) for dir in self.remote_eval(listdir, '/')]
self.sync_time()
self.name = self.remote_eval(board_name, self.default_board_name())
self.dev_name_short = self.name
epoch_tuple = self.remote_eval(get_time_epoch)
self.time_offset = calendar.timegm(epoch_tuple)
# The pyboard maintains its time as localtime, whereas unix and
# esp32 maintain their time as GMT
self.adjust_for_timezone = (epoch_tuple[0] != 1970)
def check_pyb(self):
"""Raises an error if the pyb object was closed."""
if self.pyb is None:
raise DeviceError('serial port %s closed' % self.dev_name_short)
def close(self):
"""Closes the serial port."""
if self.pyb and self.pyb.serial:
self.pyb.serial.close()
self.pyb = None
def default_board_name(self):
return 'unknown'
def is_root_path(self, filename):
"""Determines if 'filename' corresponds to a directory on this device."""
test_filename = filename + '/'
for root_dir in self.root_dirs:
if test_filename.startswith(root_dir):
return True
return False
def is_serial_port(self, port):
return False
def read(self, num_bytes):
"""Reads data from the pyboard over the serial port."""
self.check_pyb()
try:
return self.pyb.serial.read(num_bytes)
except (serial.serialutil.SerialException, TypeError):
# Write failed - assume that we got disconnected
self.close()
raise DeviceError('serial port %s closed' % self.dev_name_short)
def remote(self, func, *args, xfer_func=None, **kwargs):
"""Calls func with the indicated args on the micropython board."""
global HAS_BUFFER
HAS_BUFFER = self.has_buffer
if hasattr(func, 'extra_funcs'):
func_name = func.name
func_lines = []
for extra_func in func.extra_funcs:
func_lines += inspect.getsource(extra_func).split('\n')
func_lines += ['']
func_lines += filter(lambda line: line[:1] != '@', func.source.split('\n'))
func_src = '\n'.join(func_lines)
else:
func_name = func.__name__
func_src = inspect.getsource(func)
args_arr = [remote_repr(i) for i in args]
kwargs_arr = ["{}={}".format(k, remote_repr(v)) for k, v in kwargs.items()]
func_src += 'output = ' + func_name + '('
func_src += ', '.join(args_arr + kwargs_arr)
func_src += ')\n'
func_src += 'if output is None:\n'
func_src += ' print("None")\n'
func_src += 'else:\n'
func_src += ' print(output)\n'
time_offset = self.time_offset
if self.adjust_for_timezone:
time_offset -= time.localtime().tm_gmtoff
func_src = func_src.replace('TIME_OFFSET', '{}'.format(time_offset))
func_src = func_src.replace('HAS_BUFFER', '{}'.format(HAS_BUFFER))
func_src = func_src.replace('BUFFER_SIZE', '{}'.format(BUFFER_SIZE))
func_src = func_src.replace('IS_UPY', 'True')
if DEBUG:
print('----- About to send %d bytes of code to the pyboard -----' % len(func_src))
print(func_src)
print('-----')
self.check_pyb()
try:
self.pyb.enter_raw_repl()
self.check_pyb()
output = self.pyb.exec_raw_no_follow(func_src)
if xfer_func:
xfer_func(self, *args, **kwargs)
self.check_pyb()
output, _ = self.pyb.follow(timeout=20)
self.check_pyb()
self.pyb.exit_raw_repl()
except (serial.serialutil.SerialException, TypeError):
self.close()
raise DeviceError('serial port %s closed' % self.dev_name_short)
if DEBUG:
print('-----Response-----')
print(output)
print('-----')
return output
def remote_eval(self, func, *args, **kwargs):
"""Calls func with the indicated args on the micropython board, and
converts the response back into python by using eval.
"""
return eval(self.remote(func, *args, **kwargs))
def status(self):
"""Returns a status string to indicate whether we're connected to
the pyboard or not.
"""
if self.pyb is None:
return 'closed'
return 'connected'
def sync_time(self):
"""Sets the time on the pyboard to match the time on the host."""
now = time.localtime(time.time())
self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1,
now.tm_hour, now.tm_min, now.tm_sec, 0))
def write(self, buf):
"""Writes data to the pyboard over the serial port."""
self.check_pyb()
try:
return self.pyb.serial.write(buf)
except (serial.serialutil.SerialException, BrokenPipeError, TypeError):
# Write failed - assume that we got disconnected
self.close()
raise DeviceError('{} closed'.format(self.dev_name_short))
class DeviceSerial(Device):
def __init__(self, port, baud, wait):
self.port = port
self.baud = baud
self.wait = wait
if wait and not os.path.exists(port):
toggle = False
try:
sys.stdout.write("Waiting %d seconds for serial port '%s' to exist" % (wait, port))
sys.stdout.flush()
while wait and not os.path.exists(port):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.5)
toggle = not toggle
wait = wait if not toggle else wait -1
sys.stdout.write("\n")
except KeyboardInterrupt:
raise DeviceError('Interrupted')
self.dev_name_short = port
self.dev_name_long = '%s at %d baud' % (port, baud)
try:
pyb = Pyboard(port, baudrate=baud, wait=wait)
except PyboardError as err:
print(err)
sys.exit(1)
# Bluetooth devices take some time to connect at startup, and writes
# issued while the remote isn't connected will fail. So we send newlines
# with pauses until one of our writes succeeds.
try:
# we send a Control-C which should kill the current line
# assuming we're talking to the micropython repl. If we send
# a newline, then the junk might get interpreted as a command
# which will do who knows what.
pyb.serial.write(b'\x03')
except serial.serialutil.SerialException:
# Write failed. Now report that we're waiting and keep trying until
# a write succeeds
sys.stdout.write("Waiting for transport to be connected.")
while True:
time.sleep(0.5)
try:
pyb.serial.write(b'\x03')
break
except serial.serialutil.SerialException:
pass
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
# In theory the serial port is now ready to use
Device.__init__(self, pyb)
def default_board_name(self):
return 'pyboard'
def is_serial_port(self, port):
return self.dev_name_short == port
@property
def timeout(self):
"""Gets the timeout associated with the serial port."""
self.check_pyb()
return self.pyb.serial.timeout
@timeout.setter
def timeout(self, value):
"""Sets the timeout associated with the serial port."""
self.check_pyb()
try:
self.pyb.serial.timeout = value
except:
# timeout is a property so it calls code, and that can fail
# if the serial port is closed.
pass
class DeviceNet(Device):
def __init__(self, name, ip_address, user, password):
self.dev_name_short = '{} ({})'.format(name, ip_address)
self.dev_name_long = self.dev_name_short
try:
pyb = Pyboard(ip_address, user=user, password=password)
except (socket.timeout, OSError):
raise DeviceError('No response from {}'.format(ip_address))
except KeyboardInterrupt:
raise DeviceError('Interrupted')
Device.__init__(self, pyb)
def default_board_name(self):
return 'wipy'
@property
def timeout(self):
"""There is no equivalent to timeout for the telnet connection."""
return None
@timeout.setter
def timeout(self, value):
"""There is no equivalent to timeout for the telnet connection."""
pass
class AutoBool(object):
"""A simple class which allows a boolean to be set to False in conjunction
with a with: statement.
"""
def __init__(self):
self.value = False
def __enter__(self):
self.value = True
def __exit__(self, type, value, traceback):
self.value = False
def __call__(self):
return self.value
class ShellError(Exception):
"""Errors that we want to report to the user and keep running."""
pass
class Shell(cmd.Cmd):
"""Implements the shell as a command line interpreter."""
def __init__(self, filename=None, timing=False, **kwargs):
cmd.Cmd.__init__(self, **kwargs)
if 'stdin' in kwargs:
cmd.Cmd.use_rawinput = 0
self.real_stdout = self.stdout
self.smart_stdout = SmartFile(self.stdout)
self.stderr = SmartFile(sys.stderr)
self.filename = filename
self.line_num = 0
self.timing = timing
global cur_dir
cur_dir = os.getcwd()
self.prev_dir = cur_dir
self.columns = shutil.get_terminal_size().columns
self.redirect_dev = None
self.redirect_filename = ''
self.redirect_mode = ''
self.quit_when_no_output = False
self.quit_serial_reader = False
readline.set_completer_delims(DELIMS)
self.set_prompt()
def set_prompt(self):
if self.stdin == sys.stdin:
prompt = PROMPT_COLOR + cur_dir + END_COLOR + '> '
if FAKE_INPUT_PROMPT:
print(prompt, end='')
self.prompt = ''
else:
self.prompt = prompt
else:
# Executing commands from a file
self.prompt = ''
def cmdloop(self, line=None):
if line:
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
else:
cmd.Cmd.cmdloop(self)
def onecmd(self, line):
"""Override onecmd.
1 - So we don't have to have a do_EOF method.
2 - So we can strip comments
3 - So we can track line numbers
"""
if DEBUG:
print('Executing "%s"' % line)
self.line_num += 1
if line == "EOF" or line == 'exit':
if cmd.Cmd.use_rawinput:
# This means that we printed a prompt, and we'll want to
# print a newline to pretty things up for the caller.
self.print('')
return True
# Strip comments
comment_idx = line.find("#")
if comment_idx >= 0:
line = line[0:comment_idx]
line = line.strip()
# search multiple commands on the same line
lexer = shlex.shlex(line)
lexer.whitespace = ''
for issemicolon, group in itertools.groupby(lexer, lambda x: x == ";"):
if not issemicolon:
self.onecmd_exec("".join(group))
def onecmd_exec(self, line):
try:
if self.timing:
start_time = time.time()
result = cmd.Cmd.onecmd(self, line)
end_time = time.time()
print('took %.3f seconds' % (end_time - start_time))
return result
else:
return cmd.Cmd.onecmd(self, line)
except DeviceError as err:
print_err(err)
except ShellError as err:
print_err(err)
except SystemExit:
# When you use -h with argparse it winds up call sys.exit, which
# raises a SystemExit. We intercept it because we don't want to
# exit the shell, just the command.
return False
def default(self, line):
print_err("Unrecognized command:", line)
def emptyline(self):
"""We want empty lines to do nothing. By default they would repeat the
previous command.
"""
pass
def precmd(self, line):
self.stdout = self.smart_stdout
return line
def postcmd(self, stop, line):
if self.stdout != self.smart_stdout:
if self.redirect_dev is not None:
# Redirecting to a remote device, now that we're finished the
# command, we can copy the collected output to the remote.
if DEBUG:
print('Copy redirected output to "%s"' % self.redirect_filename)
# This belongs on the remote. Copy/append now
filesize = self.stdout.tell()
self.stdout.seek(0)
self.redirect_dev.remote(recv_file_from_host, self.stdout,
self.redirect_filename, filesize,
dst_mode=self.redirect_mode,
xfer_func=send_file_to_remote)
self.stdout.close()
self.stdout = self.real_stdout
if not stop:
self.set_prompt()
return stop
def print(self, *args, end='\n', file=None):
"""Convenience function so you don't need to remember to put the \n
at the end of the line.
"""
if file is None:
file = self.stdout
s = ' '.join(str(arg) for arg in args) + end
file.write(s)
def create_argparser(self, command):
try:
argparse_args = getattr(self, "argparse_" + command)
except AttributeError:
return None
doc_lines = getattr(self, "do_" + command).__doc__.expandtabs().splitlines()
if '' in doc_lines:
blank_idx = doc_lines.index('')
usage = doc_lines[:blank_idx]
description = doc_lines[blank_idx+1:]
else:
usage = doc_lines
description = []
parser = argparse.ArgumentParser(
prog=command,
usage='\n'.join(usage),
description='\n'.join(description)
)
for args, kwargs in argparse_args:
parser.add_argument(*args, **kwargs)
return parser
def filename_complete(self, text, line, begidx, endidx):
"""Wrapper for catching exceptions since cmd seems to silently
absorb them.
"""
try:
return self.real_filename_complete(text, line, begidx, endidx)
except:
traceback.print_exc()
def real_filename_complete(self, text, line, begidx, endidx):
"""Figure out what filenames match the completion."""
# line contains the full command line that's been entered so far.
# text contains the portion of the line that readline is trying to complete
# text should correspond to line[begidx:endidx]
#
# The way the completer works text will start after one of the characters
# in DELIMS. So if the filename entered so far was "embedded\ sp" then
# text will point to the s in sp.
#
# The following bit of logic backs up to find the real beginning of the
# filename.
for before_match in range(begidx, 0, -1):
if line[before_match] in DELIMS and before_match >= 1 and line[before_match - 1] != '\\':
break
# We set fixed to be the portion of the filename which is before text
# and match is the full portion of the filename that's been entered so
# far (that's the part we use for matching files).
#
# When we return a list of completions, the bit that we return should
# just be the portion that we replace 'text' with.
fixed = unescape(line[before_match+1:begidx]) # fixed portion of the match
match = unescape(line[before_match+1:endidx]) # portion to match filenames against
# We do the following to cover the case that the current directory
# is / and the path being entered is relative.
if match[0] == '/':
abs_match = match
elif cur_dir == '/':
abs_match = cur_dir + match
else:
abs_match = cur_dir + '/' + match
completions = []
prepend = ''
if abs_match.rfind('/') == 0: # match is in the root directory
# This means that we're looking for matches in the root directory
# (i.e. abs_match is /foo and the user hit TAB).
# So we'll supply the matching board names as possible completions.
# Since they're all treated as directories we leave the trailing slash.
with DEV_LOCK:
if match[0] == '/':
completions += [dev.name_path for dev in DEVS if dev.name_path.startswith(abs_match)]
else:
completions += [dev.name_path[1:] for dev in DEVS if dev.name_path.startswith(abs_match)]
if DEFAULT_DEV:
# Add root directories of the default device (i.e. /flash/ and /sd/)
if match[0] == '/':
completions += [root_dir for root_dir in DEFAULT_DEV.root_dirs if root_dir.startswith(match)]
else:
completions += [root_dir[1:] for root_dir in DEFAULT_DEV.root_dirs if root_dir[1:].startswith(match)]
else:
# This means that there are at least 2 slashes in abs_match. If one
# of them matches a board name then we need to remove the board
# name from fixed. Since the results from listdir_matches won't
# contain the board name, we need to prepend each of the completions.
with DEV_LOCK:
for dev in DEVS:
if abs_match.startswith(dev.name_path):
prepend = dev.name_path[:-1]
paths = sorted(auto(listdir_matches, match))
for path in paths:
path = prepend + path
completions.append(escape(path.replace(fixed, '', 1)))
return completions
def directory_complete(self, text, line, begidx, endidx):
"""Figure out what directories match the completion."""
return [filename for filename in self.filename_complete(text, line, begidx, endidx) if filename[-1] == '/']
def line_to_args(self, line):
"""This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator.
"""
args = line.split()
self.redirect_filename = ''
self.redirect_dev = None
redirect_index = -1
if '>' in args:
redirect_index = args.index('>')
elif '>>' in args:
redirect_index = args.index('>>')
if redirect_index >= 0:
if redirect_index + 1 >= len(args):
raise ShellError("> requires a filename")
self.redirect_filename = resolve_path(args[redirect_index + 1])
rmode = auto(get_mode, os.path.dirname(self.redirect_filename))
if not mode_isdir(rmode):
raise ShellError("Unable to redirect to '%s', directory doesn't exist" %
self.redirect_filename)
if args[redirect_index] == '>':
self.redirect_mode = 'w'
if DEBUG:
print('Redirecting (write) to', self.redirect_filename)
else:
self.redirect_mode = 'a'
if DEBUG:
print('Redirecting (append) to', self.redirect_filename)
self.redirect_dev, self.redirect_filename = get_dev_and_path(self.redirect_filename)
try:
if self.redirect_dev is None:
self.stdout = SmartFile(open(self.redirect_filename, self.redirect_mode))
else:
# Redirecting to a remote device. We collect the results locally
# and copy them to the remote device at the end of the command.
self.stdout = SmartFile(tempfile.TemporaryFile(mode='w+'))
except OSError as err:
raise ShellError(err)
del args[redirect_index + 1]
del args[redirect_index]
curr_cmd, _, _ = self.parseline(self.lastcmd)
parser = self.create_argparser(curr_cmd)
if parser:
args = parser.parse_args(args)
return args
def do_args(self, line):
"""args [arguments...]
Debug function for verifying argument parsing. This function just
prints out each argument that it receives.
"""
args = self.line_to_args(line)
for idx in range(len(args)):
self.print("arg[%d] = '%s'" % (idx, args[idx]))
def do_boards(self, _):
"""boards
Lists the boards that rshell is currently connected to.
"""
rows = []
with DEV_LOCK:
for dev in DEVS:
if dev is DEFAULT_DEV:
dirs = [dir[:-1] for dir in dev.root_dirs]
else:
dirs = []
dirs += ['/{}{}'.format(dev.name, dir)[:-1] for dir in dev.root_dirs]
dirs = 'Dirs: ' + ' '.join(dirs)
epoch = 'Epoch: {}'.format(time.gmtime(dev.time_offset)[0])
rows.append((dev.name, '@ %s' % dev.dev_name_short, dev.status(), epoch, dirs))
if rows:
column_print('<<<< ', rows, self.print)
else:
print('No boards connected')
def complete_cat(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_cat(self, line):
"""cat FILENAME...
Concatenates files and sends to stdout.
"""
# note: when we get around to supporting cat from stdin, we'll need
# to write stdin to a temp file, and then copy the file
# since we need to know the filesize when copying to the pyboard.
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
mode = auto(get_mode, filename)
if not mode_exists(mode):
print_err("Cannot access '%s': No such file" % filename)
continue
if not mode_isfile(mode):
print_err("'%s': is not a file" % filename)
continue
cat(filename, self.stdout)
def complete_cd(self, text, line, begidx, endidx):
return self.directory_complete(text, line, begidx, endidx)
def do_cd(self, line):
"""cd DIRECTORY
Changes the current directory. ~ expansion is supported, and cd -
goes to the previous directory.
"""
args = self.line_to_args(line)
if len(args) == 0:
dirname = '~'
else:
if args[0] == '-':
dirname = self.prev_dir
else:
dirname = args[0]
dirname = resolve_path(dirname)
mode = auto(get_mode, dirname)
if mode_isdir(mode):
global cur_dir
self.prev_dir = cur_dir
cur_dir = dirname
auto(chdir, dirname)
else:
print_err("Directory '%s' does not exist" % dirname)
def do_connect(self, line):
"""connect TYPE TYPE_PARAMS
connect serial port [baud]
connect telnet ip-address-or-name
Connects a pyboard to rshell.
"""
args = self.line_to_args(line)
num_args = len(args)
if num_args < 1:
print_err('Missing connection TYPE')
return
connect_type = args[0]
if connect_type == 'serial':
if num_args < 2:
print_err('Missing serial port')
return
port = args[1]
if num_args < 3:
baud = 115200
else:
try:
baud = int(args[2])
except ValueError:
print_err("Expecting baud to be numeric. Found '{}'".format(args[2]))
return
connect_serial(port, baud)
elif connect_type == 'telnet':
if num_args < 2:
print_err('Missing hostname or ip-address')
return
name = args[1]
connect_telnet(name)
else:
print_err('Unrecognized connection TYPE: {}'.format(connect_type))
def complete_cp(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_cp(self, line):
"""cp SOURCE DEST Copy a single SOURCE file to DEST file.
cp SOURCE... DIRECTORY Copy multiple SOURCE files to a directory.
cp [-r|--recursive] [SOURCE|SOURCE_DIR]... DIRECTORY
cp [-r] PATTERN DIRECTORY Copy matching files to DIRECTORY.
The destination must be a directory except in the case of
copying a single file. To copy directories -r must be specified.
This will cause directories and their contents to be recursively
copied.
"""
args = self.line_to_args(line)
if len(args.filenames) < 2:
print_err('Missing destination file')
return
dst_dirname = resolve_path(args.filenames[-1])
dst_mode = auto(get_mode, dst_dirname)
d_dst = {} # Destination directory: lookup stat by basename
if args.recursive:
dst_files = auto(listdir_stat, dst_dirname)
if dst_files is None:
err = "cp: target {} is not a directory"
print_err(err.format(dst_dirname))
return
for name, stat in dst_files:
d_dst[name] = stat
src_filenames = args.filenames[:-1]
# Process PATTERN
sfn = src_filenames[0]
if is_pattern(sfn):
if len(src_filenames) > 1:
print_err("Usage: cp [-r] PATTERN DIRECTORY")
return
src_filenames = process_pattern(sfn)
if src_filenames is None:
return
for src_filename in src_filenames:
if is_pattern(src_filename):
print_err("Only one pattern permitted.")
return
src_filename = resolve_path(src_filename)
src_mode = auto(get_mode, src_filename)
if not mode_exists(src_mode):
print_err("File '{}' doesn't exist".format(src_filename))
return
if mode_isdir(src_mode):
if args.recursive: # Copying a directory
src_basename = os.path.basename(src_filename)
dst_filename = dst_dirname + '/' + src_basename
if src_basename in d_dst:
dst_stat = d_dst[src_basename]
dst_mode = stat_mode(dst_stat)
if not mode_isdir(dst_mode):
err = "Destination {} is not a directory"
print_err(err.format(dst_filename))
return
else:
if not mkdir(dst_filename):
err = "Unable to create directory {}"
print_err(err.format(dst_filename))
return
rsync(src_filename, dst_filename, mirror=False, dry_run=False,
print_func=lambda *args: None, recursed=False, sync_hidden=args.all)
else:
print_err("Omitting directory {}".format(src_filename))
continue
if mode_isdir(dst_mode):
dst_filename = dst_dirname + '/' + os.path.basename(src_filename)
else:
dst_filename = dst_dirname
if not cp(src_filename, dst_filename):
err = "Unable to copy '{}' to '{}'"
print_err(err.format(src_filename, dst_filename))
break
def do_echo(self, line):
"""echo TEXT...
Display a line of text.
"""
args = self.line_to_args(line)
self.print(*args)
def complete_edit(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_edit(self, line):
"""edit FILE
Copies the file locally, launches an editor to edit the file.
When the editor exits, if the file was modified then its copied
back.
You can specify the editor used with the --editor command line
option when you start rshell, or by using the VISUAL or EDITOR
environment variable. if none of those are set, then vi will be used.
"""
if len(line) == 0:
print_err("Must provide a filename")
return
filename = resolve_path(line)
dev, dev_filename = get_dev_and_path(filename)
mode = auto(get_mode, filename)
if mode_exists(mode) and mode_isdir(mode):
print_err("Unable to edit directory '{}'".format(filename))
return
if dev is None:
# File is local
os.system("{} '{}'".format(EDITOR, filename))
else:
# File is remote
with tempfile.TemporaryDirectory() as temp_dir:
local_filename = os.path.join(temp_dir, os.path.basename(filename))
if mode_exists(mode):
print('Retrieving {} ...'.format(filename))
cp(filename, local_filename)
old_stat = get_stat(local_filename)
os.system("{} '{}'".format(EDITOR, local_filename))
new_stat = get_stat(local_filename)
if old_stat != new_stat:
self.print('Updating {} ...'.format(filename))
cp(local_filename, filename)
def complete_filesize(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_exit(self, _):
"""exit
Exits from rshell.
"""
return True
def do_filesize(self, line):
"""filesize FILE
Prints the size of the file, in bytes. This function is primarily
for testing.
"""
if len(line) == 0:
print_err("Must provide a filename")
return
filename = resolve_path(line)
self.print(auto(get_filesize, filename))
def complete_filetype(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_filetype(self, line):
"""filetype FILE
Prints the type of file (dir or file). This function is primarily
for testing.
"""
if len(line) == 0:
print_err("Must provide a filename")
return
filename = resolve_path(line)
mode = auto(get_mode, filename)
if mode_exists(mode):
if mode_isdir(mode):
self.print('dir')
elif mode_isfile(mode):
self.print('file')
else:
self.print('unknown')
else:
self.print('missing')
def do_help(self, line):
"""help [COMMAND]
List available commands with no arguments, or detailed help when
a command is provided.
"""
# We provide a help function so that we can trim the leading spaces
# from the docstrings. The builtin help function doesn't do that.
if not line:
cmd.Cmd.do_help(self, line)
self.print(EXIT_STR)
return
parser = self.create_argparser(line)
if parser:
parser.print_help()
return
try:
doc = getattr(self, 'do_' + line).__doc__
if doc:
self.print("%s" % trim(doc))
return
except AttributeError:
pass
self.print(str(self.nohelp % (line,)))
argparse_ls = (
add_arg(
'-a', '--all',
dest='all',
action='store_true',
help='do not ignore hidden files',
default=False
),
add_arg(
'-l', '--long',
dest='long',
action='store_true',
help='use a long listing format',
default=False
),
add_arg(
'filenames',
metavar='FILE',
nargs='*',
help='Files directories or patterns to list'
),
)
def complete_ls(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_ls(self, line):
"""ls [-a] [-l] [FILE|DIRECTORY|PATTERN]...
PATTERN supports * ? [seq] [!seq] Unix filename matching
List directory contents.
"""
args = self.line_to_args(line)
if len(args.filenames) == 0:
args.filenames = ['.']
for idx, fn in enumerate(args.filenames):
if not is_pattern(fn):
filename = resolve_path(fn)
stat = auto(get_stat, filename)
mode = stat_mode(stat)
if not mode_exists(mode):
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
continue
if not mode_isdir(mode):
if args.long:
print_long(filename, stat, self.print)
else:
self.print(filename)
continue
if len(args.filenames) > 1:
if idx > 0:
self.print('')
self.print("%s:" % filename)
pattern = '*'
else: # A pattern was specified
filename, pattern = validate_pattern(fn)
if filename is None: # An error was printed
continue
files = []
ldir_stat = auto(listdir_stat, filename)
if ldir_stat is None:
err = "Cannot access '{}': No such file or directory"
print_err(err.format(filename))
else:
for filename, stat in sorted(ldir_stat,
key=lambda entry: entry[0]):
if is_visible(filename) or args.all:
if fnmatch.fnmatch(filename, pattern):
if args.long:
print_long(filename, stat, self.print)
else:
files.append(decorated_filename(filename, stat))
if len(files) > 0:
print_cols(sorted(files), self.print, self.columns)
def complete_mkdir(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_mkdir(self, line):
"""mkdir DIRECTORY...
Creates one or more directories.
"""
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
if not mkdir(filename):
print_err('Unable to create %s' % filename)
def repl_serial_to_stdout(self, dev):
"""Runs as a thread which has a sole purpose of readding bytes from
the serial port and writing them to stdout. Used by do_repl.
"""
print('repl_serial_to_stdout dev =', dev)
with self.serial_reader_running:
try:
save_timeout = dev.timeout
# Set a timeout so that the read returns periodically with no data
# and allows us to check whether the main thread wants us to quit.
dev.timeout = 1
while not self.quit_serial_reader:
try:
char = dev.read(1)
except serial.serialutil.SerialException:
# This happens if the pyboard reboots, or a USB port
# goes away.
return
except TypeError:
# This is a bug in serialposix.py starting with python 3.3
# which causes a TypeError during the handling of the
# select.error. So we treat this the same as
# serial.serialutil.SerialException:
return
except ConnectionResetError:
# This happens over a telnet session, if it resets
return
if not char:
# This means that the read timed out. We'll check the quit
# flag and return if needed
if self.quit_when_no_output:
break
continue
self.stdout.write(char)
self.stdout.flush()
dev.timeout = save_timeout
except DeviceError:
# The device is no longer present.
return
def do_repl(self, line):
"""repl [board-name] [~ line [~]]
Enters into the regular REPL with the MicroPython board.
Use Control-X to exit REPL mode and return the shell. It may take
a second or two before the REPL exits.
If you provide a line to the REPL command, then that will be executed.
If you want the REPL to exit, end the line with the ~ character.
"""
args = self.line_to_args(line)
if len(args) > 0 and line[0] != '~':
name = args[0]
line = ' '.join(args[1:])
else:
name = ''
dev = find_device_by_name(name)
if not dev:
print_err("Unable to find board '%s'" % name)
return
if line[0:2] == '~ ':
line = line[2:]
self.print('Entering REPL. Use Control-%c to exit.' % QUIT_REPL_CHAR)
self.quit_serial_reader = False
self.quit_when_no_output = False
self.serial_reader_running = AutoBool()
repl_thread = threading.Thread(target=self.repl_serial_to_stdout, args=(dev,), name='REPL_serial_to_stdout')
repl_thread.daemon = True
repl_thread.start()
# Wait for reader to start
while not self.serial_reader_running():
pass
try:
# Wake up the prompt
dev.write(b'\r')
if line:
if line[-1] == '~':
line = line[:-1]
self.quit_when_no_output = True
line = ';'.join(line.split('~'))
dev.write(bytes(line, encoding='utf-8'))
dev.write(b'\r')
if not self.quit_when_no_output:
while self.serial_reader_running():
char = getch()
if not char:
continue
if char == QUIT_REPL_BYTE:
self.quit_serial_reader = True
# When using telnet with the WiPy, it doesn't support
# an initial timeout. So for the meantime, we send a
# space which should cause the wipy to echo back a
# space which will wakeup our reader thread so it will
# notice the quit.
dev.write(b' ')
# Give the reader thread a chance to detect the quit
# then we don't have to call getch() above again which
# means we'd need to wait for another character.
time.sleep(0.5)
# Print a newline so that the rshell prompt looks good.
self.print('')
# We stay in the loop so that we can still enter
# characters until we detect the reader thread quitting
# (mostly to cover off weird states).
continue
if char == b'\n':
dev.write(b'\r')
else:
dev.write(char)
except DeviceError as err:
# The device is no longer present.
self.print('')
self.stdout.flush()
print_err(err)
repl_thread.join()
argparse_cp = (
add_arg(
'-a', '--all',
dest='all',
action='store_true',
help='Don\'t ignore files starting with .',
default=False
),
add_arg(
'-r', '--recursive',
dest='recursive',
action='store_true',
help='Copy directories recursively',
default=False
),
add_arg(
'filenames',
metavar='FILE',
nargs='+',
help='Pattern or files and directories to copy'
),
)
argparse_rm = (
add_arg(
'-r', '--recursive',
dest='recursive',
action='store_true',
help='remove directories and their contents recursively',
default=False
),
add_arg(
'-f', '--force',
dest='force',
action='store_true',
help='ignore nonexistent files and arguments',
default=False
),
add_arg(
'filename',
metavar='FILE',
nargs='+',
help='Pattern or files and directories to remove'
),
)
def complete_rm(self, text, line, begidx, endidx):
return self.filename_complete(text, line, begidx, endidx)
def do_rm(self, line):
"""rm [-f|--force] FILE... Remove one or more files
rm [-f|--force] PATTERN Remove multiple files
rm -r [-f|--force] [FILE|DIRECTORY]... Files and/or directories
rm -r [-f|--force] PATTERN Multiple files and/or directories
Removes files or directories. To remove directories (and
any contents) -r must be specified.
"""
args = self.line_to_args(line)
filenames = args.filename
# Process PATTERN
sfn = filenames[0]
if is_pattern(sfn):
if len(filenames) > 1:
print_err("Usage: rm [-r] [-f] PATTERN")
return
filenames = process_pattern(sfn)
if filenames is None:
return
for filename in filenames:
filename = resolve_path(filename)
if not rm(filename, recursive=args.recursive, force=args.force):
if not args.force:
print_err("Unable to remove '{}'".format(filename))
break
def do_shell(self, line):
"""!some-shell-command args
Launches a shell and executes whatever command you provide. If you
don't provide any commands, then it will launch a bash sub-shell
and when exit from bash (Control-D) then it will return to rshell.
"""
if not line:
line = '/bin/bash'
os.system(line)
argparse_rsync = (
add_arg(
'-a', '--all',
dest='all',
action='store_true',
help='Don\'t ignore files starting with .',
default=False
),
add_arg(
'-m', '--mirror',
dest='mirror',
action='store_true',
help="causes files in the destination which don't exist in "
"the source to be removed. Without --mirror only file "
"copies occur. No deletions will take place.",
default=False,
),
add_arg(
'-n', '--dry-run',
dest='dry_run',
action='store_true',
help='shows what would be done without actually performing '
'any file copies. Implies --verbose.',
default=False
),
add_arg(
'-v', '--verbose',
dest='verbose',
action='store_true',
help='shows what has been done.',
default=False
),
add_arg(
'src_dir',
metavar='SRC_DIR',
help='Source directory'
),
add_arg(
'dst_dir',
metavar='DEST_DIR',
help='Destination directory'
),
)
def do_rsync(self, line):
"""rsync [-m|--mirror] [-n|--dry-run] [-v|--verbose] SRC_DIR DEST_DIR
Synchronizes a destination directory tree with a source directory tree.
"""
args = self.line_to_args(line)
src_dir = resolve_path(args.src_dir)
dst_dir = resolve_path(args.dst_dir)
pf = print if args.dry_run or args.verbose else lambda *args : None
rsync(src_dir, dst_dir, mirror=args.mirror, dry_run=args.dry_run,
print_func=pf, recursed=False, sync_hidden=args.all)
def real_main():
"""The main program."""
try:
default_baud = int(os.getenv('RSHELL_BAUD'))
except:
default_baud = 115200
default_port = os.getenv('RSHELL_PORT')
#if not default_port:
# default_port = '/dev/ttyACM0'
default_user = os.getenv('RSHELL_USER') or 'micro'
default_password = os.getenv('RSHELL_PASSWORD') or 'python'
default_editor = os.getenv('RSHELL_EDITOR') or os.getenv('VISUAL') or os.getenv('EDITOR') or 'vi'
global BUFFER_SIZE
try:
default_buffer_size = int(os.getenv('RSHELL_BUFFER_SIZE'))
except:
default_buffer_size = BUFFER_SIZE
parser = argparse.ArgumentParser(
prog="rshell",
usage="%(prog)s [options] [command]",
description="Remote Shell for a MicroPython board.",
epilog=("You can specify the default serial port using the " +
"RSHELL_PORT environment variable.")
)
parser.add_argument(
"-b", "--baud",
dest="baud",
action="store",
type=int,
help="Set the baudrate used (default = %d)" % default_baud,
default=default_baud
)
parser.add_argument(
"--buffer-size",
dest="buffer_size",
action="store",
type=int,
help="Set the buffer size used for transfers (default = %d)" % default_buffer_size,
default=default_buffer_size
)
parser.add_argument(
"-p", "--port",
dest="port",
help="Set the serial port to use (default '%s')" % default_port,
default=default_port
)
parser.add_argument(
"-u", "--user",
dest="user",
help="Set username to use (default '%s')" % default_user,
default=default_user
)
parser.add_argument(
"-w", "--password",
dest="password",
help="Set password to use (default '%s')" % default_password,
default=default_password
)
parser.add_argument(
"-e", "--editor",
dest="editor",
help="Set the editor to use (default '%s')" % default_editor,
default=default_editor
)
parser.add_argument(
"-f", "--file",
dest="filename",
help="Specifies a file of commands to process."
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
help="Enable debug features",
default=False
)
parser.add_argument(
"-n", "--nocolor",
dest="nocolor",
action="store_true",
help="Turn off colorized output",
default=False
)
parser.add_argument(
"-a", "--ascii",
dest="binary_xfer",
action="store_true",
help="ASCII encode binary files for transfer",
default=False
)
parser.add_argument(
"--wait",
dest="wait",
type=int,
action="store",
help="Seconds to wait for serial port",
default=0
)
parser.add_argument(
"--timing",
dest="timing",
action="store_true",
help="Print timing information about each command",
default=False
)
parser.add_argument(
'-V', '--version',
dest='version',
action='store_true',
help='Reports the version and exits.',
default=False
)
parser.add_argument(
"--quiet",
dest="quiet",
action="store_true",
help="Turns off some output (useful for testing)",
default=False
)
parser.add_argument(
"cmd",
nargs=argparse.REMAINDER,
help="Optional command to execute"
)
args = parser.parse_args(sys.argv[1:])
if args.debug:
print("Debug = %s" % args.debug)
print("Port = %s" % args.port)
print("Baud = %d" % args.baud)
print("User = %s" % args.user)
print("Password = %s" % args.password)
print("Wait = %d" % args.wait)
print("nocolor = %d" % args.nocolor)
print("binary = %d" % args.binary_xfer)
print("Timing = %d" % args.timing)
print("Quiet = %d" % args.quiet)
print("Buffer_size = %d" % args.buffer_size)
print("Cmd = [%s]" % ', '.join(args.cmd))
if args.version:
print(__version__)
return
global DEBUG
DEBUG = args.debug
global QUIET
QUIET = args.quiet
global EDITOR
EDITOR = args.editor
BUFFER_SIZE = args.buffer_size
if args.nocolor:
global DIR_COLOR, PROMPT_COLOR, PY_COLOR, END_COLOR
DIR_COLOR = ''
PROMPT_COLOR = ''
PY_COLOR = ''
END_COLOR = ''
else:
if sys.platform == 'darwin':
# The readline that comes with OSX screws up colors in the prompt
global FAKE_INPUT_PROMPT
FAKE_INPUT_PROMPT = True
global BINARY_XFER
BINARY_XFER = args.binary_xfer
if args.port:
try:
connect(args.port, baud=args.baud, wait=args.wait, user=args.user, password=args.password)
except DeviceError as err:
print(err)
else:
autoscan()
autoconnect()
if args.filename:
with open(args.filename) as cmd_file:
shell = Shell(stdin=cmd_file, filename=args.filename, timing=args.timing)
shell.cmdloop('')
else:
cmd_line = ' '.join(args.cmd)
if cmd_line == '':
print('Welcome to rshell.', EXIT_STR)
if num_devices() == 0:
print('')
print('No MicroPython boards connected - use the connect command to add one')
print('')
shell = Shell(timing=args.timing)
try:
shell.cmdloop(cmd_line)
except KeyboardInterrupt:
print('')
def main():
"""This main function saves the stdin termios settings, calls real_main,
and restores stdin termios settings when it returns.
"""
save_settings = None
stdin_fd = -1
try:
import termios
stdin_fd = sys.stdin.fileno()
save_settings = termios.tcgetattr(stdin_fd)
except:
pass
try:
real_main()
finally:
if save_settings:
termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
if __name__ == "__main__":
main()
|
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, bytearray()))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def _basetest_sock_recv_into(self, httpd, sock):
# same as _basetest_sock_client_ops, but using sock_recv_into
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = bytearray(1024)
with memoryview(data) as buf:
nbytes = self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[:1024]))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[nbytes:]))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket()
self._basetest_sock_recv_into(httpd, sock)
@support.skip_unless_bind_unix_socket
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_recv_into(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipIf(sys.platform == 'darwin', 'test hangs on MacOS')
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2, loop=loop)
await asyncio.sleep(1e-4, loop=loop)
await asyncio.sleep(1e-6, loop=loop)
await asyncio.sleep(1e-8, loop=loop)
await asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
class MySendfileProto(MyBaseProto):
def __init__(self, loop=None, close_after=0):
super().__init__(loop)
self.data = bytearray()
self.close_after = close_after
def data_received(self, data):
self.data.extend(data)
super().data_received(data)
if self.close_after and self.nbytes >= self.close_after:
self.transport.close()
class SendfileMixin:
# Note: sendfile via SSL transport is equal to sendfile fallback
DATA = b"12345abcde" * 160 * 1024 # 160 KiB
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self, *, is_ssl=False, close_after=0):
port = support.find_unused_port()
srv_proto = MySendfileProto(loop=self.loop, close_after=close_after)
if is_ssl:
if not ssl:
self.skipTest("No ssl module")
srv_ctx = test_utils.simple_server_sslcontext()
cli_ctx = test_utils.simple_client_sslcontext()
else:
srv_ctx = None
cli_ctx = None
srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# reduce recv socket buffer size to test on relative small data sets
srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: srv_proto, sock=srv_sock, ssl=srv_ctx))
if is_ssl:
server_hostname = support.HOST
else:
server_hostname = None
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# reduce send socket buffer size to test on relative small data sets
cli_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
cli_sock.connect((support.HOST, port))
cli_proto = MySendfileProto(loop=self.loop)
tr, pr = self.run_loop(self.loop.create_connection(
lambda: cli_proto, sock=cli_sock,
ssl=cli_ctx, server_hostname=server_hostname))
def cleanup():
srv_proto.transport.close()
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.run_loop(cli_proto.done)
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return srv_proto, cli_proto
@unittest.skipIf(sys.platform == 'win32', "UDP sockets are not supported")
def test_sendfile_not_supported(self):
tr, pr = self.run_loop(
self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
family=socket.AF_INET))
try:
with self.assertRaisesRegex(RuntimeError, "not supported"):
self.run_loop(
self.loop.sendfile(tr, self.file))
self.assertEqual(0, self.file.tell())
finally:
# don't use self.addCleanup because it produces resource warning
tr.close()
def test_sendfile(self):
srv_proto, cli_proto = self.prepare()
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_force_fallback(self):
srv_proto, cli_proto = self.prepare()
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_force_unsupported_native(self):
if sys.platform == 'win32':
if isinstance(self.loop, asyncio.ProactorEventLoop):
self.skipTest("Fails on proactor event loop")
srv_proto, cli_proto = self.prepare()
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
with self.assertRaisesRegex(events.SendfileNotAvailableError,
"not supported"):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file,
fallback=False))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(srv_proto.nbytes, 0)
self.assertEqual(self.file.tell(), 0)
def test_sendfile_ssl(self):
srv_proto, cli_proto = self.prepare(is_ssl=True)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_for_closing_transp(self):
srv_proto, cli_proto = self.prepare()
cli_proto.transport.close()
with self.assertRaisesRegex(RuntimeError, "is closing"):
self.run_loop(self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertEqual(srv_proto.nbytes, 0)
self.assertEqual(self.file.tell(), 0)
def test_sendfile_pre_and_post_data(self):
srv_proto, cli_proto = self.prepare()
PREFIX = b'zxcvbnm' * 1024
SUFFIX = b'0987654321' * 1024
cli_proto.transport.write(PREFIX)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.write(SUFFIX)
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.data, PREFIX + self.DATA + SUFFIX)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_ssl_pre_and_post_data(self):
srv_proto, cli_proto = self.prepare(is_ssl=True)
PREFIX = b'zxcvbnm' * 1024
SUFFIX = b'0987654321' * 1024
cli_proto.transport.write(PREFIX)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.write(SUFFIX)
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.data, PREFIX + self.DATA + SUFFIX)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_partial(self):
srv_proto, cli_proto = self.prepare()
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file, 1000, 100))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, 100)
self.assertEqual(srv_proto.nbytes, 100)
self.assertEqual(srv_proto.data, self.DATA[1000:1100])
self.assertEqual(self.file.tell(), 1100)
def test_sendfile_ssl_partial(self):
srv_proto, cli_proto = self.prepare(is_ssl=True)
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file, 1000, 100))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, 100)
self.assertEqual(srv_proto.nbytes, 100)
self.assertEqual(srv_proto.data, self.DATA[1000:1100])
self.assertEqual(self.file.tell(), 1100)
def test_sendfile_close_peer_after_receiving(self):
srv_proto, cli_proto = self.prepare(close_after=len(self.DATA))
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
cli_proto.transport.close()
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_ssl_close_peer_after_receiving(self):
srv_proto, cli_proto = self.prepare(is_ssl=True,
close_after=len(self.DATA))
ret = self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertEqual(ret, len(self.DATA))
self.assertEqual(srv_proto.nbytes, len(self.DATA))
self.assertEqual(srv_proto.data, self.DATA)
self.assertEqual(self.file.tell(), len(self.DATA))
def test_sendfile_close_peer_in_middle_of_receiving(self):
srv_proto, cli_proto = self.prepare(close_after=1024)
with self.assertRaises(ConnectionError):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertTrue(1024 <= srv_proto.nbytes < len(self.DATA),
srv_proto.nbytes)
self.assertTrue(1024 <= self.file.tell() < len(self.DATA),
self.file.tell())
def test_sendfile_fallback_close_peer_in_middle_of_receiving(self):
def sendfile_native(transp, file, offset, count):
# to raise SendfileNotAvailableError
return base_events.BaseEventLoop._sendfile_native(
self.loop, transp, file, offset, count)
self.loop._sendfile_native = sendfile_native
srv_proto, cli_proto = self.prepare(close_after=1024)
with self.assertRaises(ConnectionError):
self.run_loop(
self.loop.sendfile(cli_proto.transport, self.file))
self.run_loop(srv_proto.done)
self.assertTrue(1024 <= srv_proto.nbytes < len(self.DATA),
srv_proto.nbytes)
self.assertTrue(1024 <= self.file.tell() < len(self.DATA),
self.file.tell())
@unittest.skipIf(not hasattr(os, 'sendfile'),
"Don't have native sendfile support")
def test_sendfile_prevents_bare_write(self):
srv_proto, cli_proto = self.prepare()
fut = self.loop.create_future()
async def coro():
fut.set_result(None)
return await self.loop.sendfile(cli_proto.transport, self.file)
t = self.loop.create_task(coro())
self.run_loop(fut)
with self.assertRaisesRegex(RuntimeError,
"sendfile is in progress"):
cli_proto.transport.write(b'data')
ret = self.run_loop(t)
self.assertEqual(ret, len(self.DATA))
def test_sendfile_no_fallback_for_fallback_transport(self):
transport = mock.Mock()
transport.is_closing.side_effect = lambda: False
transport._sendfile_compatible = constants._SendfileMode.FALLBACK
with self.assertRaisesRegex(RuntimeError, 'fallback is disabled'):
self.loop.run_until_complete(
self.loop.sendfile(transport, None, fallback=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
SendfileMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SendfileMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin, SendfileMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = CoroLike()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
__main__.py | #####################################################################
# #
# __main__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
"""Runmanager GUI and supporting code
"""
import queue
import os
import sys
import labscript_utils.excepthook
# Associate app windows with OS menu shortcuts:
import desktop_app
desktop_app.set_process_appid('runmanager')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'runmanager.svg'))
splash.show()
splash.update_text('importing standard library modules')
import time
import contextlib
import subprocess
import threading
import ast
import pprint
import traceback
import signal
from pathlib import Path
splash.update_text('importing matplotlib')
# Evaluation of globals happens in a thread with the pylab module imported.
# Although we don't care about plotting, importing pylab makes Qt calls. We
# can't have that from a non main thread, so we'll just disable matplotlib's
# GUI integration:
import matplotlib
matplotlib.use('Agg')
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
splash.update_text('importing labscript suite modules')
from labscript_utils.ls_zprocess import zmq_get, ProcessTree, ZMQServer
from labscript_utils.labconfig import LabConfig, save_appconfig, load_appconfig
from labscript_utils.setup_logging import setup_logging
import labscript_utils.shared_drive as shared_drive
from labscript_utils import dedent
from zprocess import raise_exception_in_thread
import runmanager
import runmanager.remote
from qtutils import (
inmain,
inmain_decorator,
UiLoader,
inthread,
DisconnectContextManager,
qtlock,
)
from labscript_utils.qtwidgets.outputbox import OutputBox
import qtutils.icons
GLOBAL_MONOSPACE_FONT = "Consolas" if os.name == 'nt' else "Ubuntu Mono"
runmanager_dir = Path(__file__).absolute().parent
process_tree = ProcessTree.instance()
# Set a meaningful name for zprocess.locking's client id:
process_tree.zlock_client.set_process_name('runmanager')
def log_if_global(g, g_list, message):
"""logs a message if the global name "g" is in "g_list"
useful if you want to print out a message inside a loop over globals,
but only for a particular global (or set of globals).
If g_list is empty, then it will use the hardcoded list below
(useful if you want to change the behaviour globally)
"""
if not isinstance(g_list, list):
g_list = [g_list]
if not g_list:
g_list = [] # add global options here
if g in g_list:
logger.info(message)
def composite_colors(r0, g0, b0, a0, r1, g1, b1, a1):
"""composite a second colour over a first with given alpha values and return the
result"""
a0 /= 255
a1 /= 255
a = a0 + a1 - a0 * a1
r = (a1 * r1 + (1 - a1) * a0 * r0) / a
g = (a1 * g1 + (1 - a1) * a0 * g0) / a
b = (a1 * b1 + (1 - a1) * a0 * b0) / a
return [int(round(x)) for x in (r, g, b, 255 * a)]
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'runmanager', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
@contextlib.contextmanager
def nested(*contextmanagers):
if contextmanagers:
with contextmanagers[0]:
with nested(*contextmanagers[1:]):
yield
else:
yield
def scroll_view_to_row_if_current(view, item):
"""Checks to see if the item is in the row of the current item. If it is, scrolls
the treeview/tableview vertically to ensure that row is visible. This is done by
recording the horizontal scroll position, then using view.scrollTo(), and then
restoring the horizontal position"""
horizontal_scrollbar = view.horizontalScrollBar()
existing_horizontal_position = horizontal_scrollbar.value()
index = item.index()
current_row = view.currentIndex().row()
if index.row() == current_row:
view.scrollTo(index)
horizontal_scrollbar.setValue(existing_horizontal_position)
class FingerTabBarWidget(QtWidgets.QTabBar):
"""A TabBar with the tabs on the left and the text horizontal. Credit to
@LegoStormtroopr, https://gist.github.com/LegoStormtroopr/5075267. We will
promote the TabBar from the ui file to one of these."""
def __init__(self, parent=None, minwidth=180, minheight=30, **kwargs):
QtWidgets.QTabBar.__init__(self, parent, **kwargs)
self.minwidth = minwidth
self.minheight = minheight
self.iconPosition = kwargs.pop('iconPosition', QtWidgets.QTabWidget.West)
self._movable = None
self.tab_movable = {}
self.paint_clip = None
def setMovable(self, movable, index=None):
"""Set tabs movable on an individual basis, or set for all tabs if no
index specified"""
if index is None:
self._movable = movable
self.tab_movable = {}
QtWidgets.QTabBar.setMovable(self, movable)
else:
self.tab_movable[int(index)] = bool(movable)
def isMovable(self, index=None):
if index is None:
if self._movable is None:
self._movable = QtWidgets.QTabBar.isMovable(self)
return self._movable
return self.tab_movable.get(index, self._movable)
def indexAtPos(self, point):
for index in range(self.count()):
if self.tabRect(index).contains(point):
return index
def mousePressEvent(self, event):
index = self.indexAtPos(event.pos())
if not self.tab_movable.get(index, self.isMovable()):
QtWidgets.QTabBar.setMovable(self, False) # disable dragging until they release the mouse
return QtWidgets.QTabBar.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
if self.isMovable():
# Restore this in case it was temporarily disabled by mousePressEvent
QtWidgets.QTabBar.setMovable(self, True)
return QtWidgets.QTabBar.mouseReleaseEvent(self, event)
def tabLayoutChange(self):
total_height = 0
for index in range(self.count()):
tabRect = self.tabRect(index)
total_height += tabRect.height()
if total_height > self.parent().height():
# Don't paint over the top of the scroll buttons:
scroll_buttons_area_height = 2*max(self.style().pixelMetric(QtWidgets.QStyle.PM_TabBarScrollButtonWidth),
qapplication.globalStrut().width())
self.paint_clip = self.width(), self.parent().height() - scroll_buttons_area_height
else:
self.paint_clip = None
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
if self.paint_clip is not None:
painter.setClipRect(0, 0, *self.paint_clip)
option = QtWidgets.QStyleOptionTab()
for index in range(self.count()):
tabRect = self.tabRect(index)
self.initStyleOption(option, index)
painter.drawControl(QtWidgets.QStyle.CE_TabBarTabShape, option)
if not self.tabIcon(index).isNull():
icon = self.tabIcon(index).pixmap(self.iconSize())
alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
tabRect.moveLeft(10)
painter.drawItemPixmap(tabRect, alignment, icon)
tabRect.moveLeft(self.iconSize().width() + 15)
else:
tabRect.moveLeft(10)
painter.drawText(tabRect, QtCore.Qt.AlignVCenter, self.tabText(index))
if self.paint_clip is not None:
x_clip, y_clip = self.paint_clip
painter.setClipping(False)
palette = self.palette()
mid_color = palette.color(QtGui.QPalette.Mid)
painter.setPen(mid_color)
painter.drawLine(0, y_clip, x_clip, y_clip)
painter.end()
def tabSizeHint(self, index):
fontmetrics = QtGui.QFontMetrics(self.font())
text_width = fontmetrics.width(self.tabText(index))
text_height = fontmetrics.height()
height = text_height + 15
height = max(self.minheight, height)
width = text_width + 15
button = self.tabButton(index, QtWidgets.QTabBar.RightSide)
if button is not None:
height = max(height, button.height() + 7)
# Same amount of space around the button horizontally as it has vertically:
width += button.width() + height - button.height()
width = max(self.minwidth, width)
return QtCore.QSize(width, height)
def setTabButton(self, index, geometry, button):
if not isinstance(button, TabToolButton):
raise TypeError('Not a TabToolButton, won\'t paint correctly. Use a TabToolButton')
result = QtWidgets.QTabBar.setTabButton(self, index, geometry, button)
button.move(*button.get_correct_position())
return result
class TabToolButton(QtWidgets.QToolButton):
def __init__(self, *args, **kwargs):
QtWidgets.QToolButton.__init__(self, *args, **kwargs)
self.setFocusPolicy(QtCore.Qt.NoFocus)
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
paint_clip = self.parent().paint_clip
if paint_clip is not None:
point = QtCore.QPoint(*paint_clip)
global_point = self.parent().mapToGlobal(point)
local_point = self.mapFromGlobal(global_point)
painter.setClipRect(0, 0, local_point.x(), local_point.y())
option = QtWidgets.QStyleOptionToolButton()
self.initStyleOption(option)
painter.drawComplexControl(QtWidgets.QStyle.CC_ToolButton, option)
def get_correct_position(self):
parent = self.parent()
for index in range(parent.count()):
if parent.tabButton(index, QtWidgets.QTabBar.RightSide) is self:
break
else:
raise LookupError('Tab not found')
tabRect = parent.tabRect(index)
tab_x, tab_y, tab_width, tab_height = tabRect.x(), tabRect.y(), tabRect.width(), tabRect.height()
size = self.sizeHint()
width = size.width()
height = size.height()
padding = int((tab_height - height) / 2)
correct_x = tab_x + tab_width - width - padding
correct_y = tab_y + padding
return correct_x, correct_y
def moveEvent(self, event):
try:
correct_x, correct_y = self.get_correct_position()
except LookupError:
return # Things aren't initialised yet
if self.x() != correct_x or self.y() != correct_y:
# Move back! I shall not be moved!
self.move(correct_x, correct_y)
return QtWidgets.QToolButton.moveEvent(self, event)
class FingerTabWidget(QtWidgets.QTabWidget):
"""A QTabWidget equivalent which uses our FingerTabBarWidget"""
def __init__(self, parent, *args):
QtWidgets.QTabWidget.__init__(self, parent, *args)
self.setTabBar(FingerTabBarWidget(self))
def addTab(self, *args, **kwargs):
closeable = kwargs.pop('closable', False)
index = QtWidgets.QTabWidget.addTab(self, *args, **kwargs)
self.setTabClosable(index, closeable)
return index
def setTabClosable(self, index, closable):
right_button = self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide)
if closable:
if not right_button:
# Make one:
close_button = TabToolButton(self.parent())
close_button.setIcon(QtGui.QIcon(':/qtutils/fugue/cross'))
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, close_button)
close_button.clicked.connect(lambda: self._on_close_button_clicked(close_button))
else:
if right_button:
# Get rid of it:
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, None)
def _on_close_button_clicked(self, button):
for index in range(self.tabBar().count()):
if self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide) is button:
self.tabCloseRequested.emit(index)
break
class ItemView(object):
"""Mixin for QTableView and QTreeView that emits a custom signal leftClicked(index)
after a left click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Also has modified tab and arrow key behaviour and custom selection
highlighting."""
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
COLOR_HIGHLIGHT = "#40308CC6" # Semitransparent blue
def __init__(self, *args):
super(ItemView, self).__init__(*args)
self._pressed_index = None
self._double_click = False
self.setAutoScroll(False)
p = self.palette()
for group in [QtGui.QPalette.Active, QtGui.QPalette.Inactive]:
p.setColor(
group,
QtGui.QPalette.Highlight,
QtGui.QColor(self.COLOR_HIGHLIGHT))
p.setColor(
group,
QtGui.QPalette.HighlightedText,
p.color(QtGui.QPalette.Active, QtGui.QPalette.Foreground)
)
self.setPalette(p)
def mousePressEvent(self, event):
result = super(ItemView, self).mousePressEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = super(ItemView, self).leaveEvent(event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = super(ItemView, self).mouseDoubleClickEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = super(ItemView, self).mouseReleaseEvent(event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
item = self.model().itemFromIndex(self.currentIndex())
if item.isEditable():
# Space/enter edits editable items:
self.edit(self.currentIndex())
else:
# Space/enter on non-editable items simulates a left click:
self.leftClicked.emit(self.currentIndex())
return super(ItemView, self).keyPressEvent(event)
def moveCursor(self, cursor_action, keyboard_modifiers):
current_index = self.currentIndex()
current_row, current_column = current_index.row(), current_index.column()
if cursor_action == QtWidgets.QAbstractItemView.MoveUp:
return current_index.sibling(current_row - 1, current_column)
elif cursor_action == QtWidgets.QAbstractItemView.MoveDown:
return current_index.sibling(current_row + 1, current_column)
elif cursor_action == QtWidgets.QAbstractItemView.MoveLeft:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QAbstractItemView.MoveRight:
return current_index.sibling(current_row, current_column + 1)
elif cursor_action == QtWidgets.QAbstractItemView.MovePrevious:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QAbstractItemView.MoveNext:
return current_index.sibling(current_row, current_column + 1)
else:
return super(ItemView, self).moveCursor(cursor_action, keyboard_modifiers)
class TreeView(ItemView, QtWidgets.QTreeView):
"""Treeview version of our customised ItemView"""
def __init__(self, parent=None):
super(TreeView, self).__init__(parent)
# Set columns to their minimum size, disabling resizing. Caller may still
# configure a specific section to stretch:
self.header().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.setItemDelegate(ItemDelegate(self))
class TableView(ItemView, QtWidgets.QTableView):
"""TableView version of our customised ItemView"""
def __init__(self, parent=None):
super(TableView, self).__init__(parent)
# Set rows and columns to the minimum size, disabling interactive resizing.
# Caller may still configure a specific column to stretch:
self.verticalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.horizontalHeader().sectionResized.connect(self.on_column_resized)
self.setItemDelegate(ItemDelegate(self))
self.verticalHeader().hide()
self.setShowGrid(False)
self.horizontalHeader().setHighlightSections(False)
def on_column_resized(self, col):
for row in range(self.model().rowCount()):
self.resizeRowToContents(row)
class AlternatingColorModel(QtGui.QStandardItemModel):
def __init__(self, view):
QtGui.QStandardItemModel.__init__(self)
# How much darker in each channel is the alternate base color compared
# to the base color?
self.view = view
palette = view.palette()
self.normal_color = palette.color(QtGui.QPalette.Base)
self.alternate_color = palette.color(QtGui.QPalette.AlternateBase)
r, g, b, a = self.normal_color.getRgb()
alt_r, alt_g, alt_b, alt_a = self.alternate_color.getRgb()
self.delta_r = alt_r - r
self.delta_g = alt_g - g
self.delta_b = alt_b - b
self.delta_a = alt_a - a
# A cache, store brushes so we don't have to recalculate them. Is faster.
self.bg_brushes = {}
def get_bgbrush(self, normal_brush, alternate, selected):
"""Get cell colour as a function of its ordinary colour, whether it is on an odd
row, and whether it is selected."""
normal_rgb = normal_brush.color().getRgb() if normal_brush is not None else None
try:
return self.bg_brushes[normal_rgb, alternate, selected]
except KeyError:
pass
# Get the colour of the cell with alternate row shading:
if normal_rgb is None:
# No colour has been set. Use palette colours:
if alternate:
bg_color = self.alternate_color
else:
bg_color = self.normal_color
else:
bg_color = normal_brush.color()
if alternate:
# Modify alternate rows:
r, g, b, a = normal_rgb
alt_r = min(max(r + self.delta_r, 0), 255)
alt_g = min(max(g + self.delta_g, 0), 255)
alt_b = min(max(b + self.delta_b, 0), 255)
alt_a = min(max(a + self.delta_a, 0), 255)
bg_color = QtGui.QColor(alt_r, alt_g, alt_b, alt_a)
# If parent is a TableView, we handle selection highlighting as part of the
# background colours:
if selected and isinstance(self.view, QtWidgets.QTableView):
# Overlay highlight colour:
r_s, g_s, b_s, a_s = QtGui.QColor(ItemView.COLOR_HIGHLIGHT).getRgb()
r_0, g_0, b_0, a_0 = bg_color.getRgb()
rgb = composite_colors(r_0, g_0, b_0, a_0, r_s, g_s, b_s, a_s)
bg_color = QtGui.QColor(*rgb)
brush = QtGui.QBrush(bg_color)
self.bg_brushes[normal_rgb, alternate, selected] = brush
return brush
def data(self, index, role):
"""When background color data is being requested, returns modified colours for
every second row, according to the palette of the view. This has the effect of
making the alternate colours visible even when custom colors have been set - the
same shading will be applied to the custom colours. Only really looks sensible
when the normal and alternate colors are similar. Also applies selection
highlight colour (using ItemView.COLOR_HIGHLIGHT), similarly with alternate-row
shading, for the case of a QTableView."""
if role == QtCore.Qt.BackgroundRole:
normal_brush = QtGui.QStandardItemModel.data(self, index, QtCore.Qt.BackgroundRole)
selected = index in self.view.selectedIndexes()
alternate = index.row() % 2
return self.get_bgbrush(normal_brush, alternate, selected)
return QtGui.QStandardItemModel.data(self, index, role)
class Editor(QtWidgets.QTextEdit):
"""Popup editor with word wrapping and automatic resizing."""
def __init__(self, parent):
QtWidgets.QTextEdit.__init__(self, parent)
self.setWordWrapMode(QtGui.QTextOption.WordWrap)
self.setAcceptRichText(False)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textChanged.connect(self.update_size)
self.initial_height = None
def update_size(self):
if self.initial_height is not None:
# Temporarily shrink back to the initial height, just so that the document
# size below returns the preferred size rather than the current size.
# QTextDocument doesn't have a sizeHint of minimumSizeHint method, so this
# is the best we can do to get its minimum size.
self.setFixedHeight(self.initial_height)
preferred_height = self.document().size().toSize().height()
# Do not shrink smaller than the initial height:
if self.initial_height is not None and preferred_height >= self.initial_height:
self.setFixedHeight(preferred_height)
def resizeEvent(self, event):
result = QtWidgets.QTextEdit.resizeEvent(self, event)
# Record the initial height after it is first set:
if self.initial_height is None:
self.initial_height = self.height()
return result
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a larger row height and column width, faint grey vertical
lines between columns, and a custom editor for handling multi-line data"""
MIN_ROW_HEIGHT = 22
EXTRA_ROW_HEIGHT = 6
EXTRA_COL_WIDTH = 20
def __init__(self, *args, **kwargs):
QtWidgets.QStyledItemDelegate.__init__(self, *args, **kwargs)
self._pen = QtGui.QPen()
self._pen.setWidth(1)
self._pen.setColor(QtGui.QColor.fromRgb(128, 128, 128, 64))
def sizeHint(self, *args):
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
if size.height() <= self.MIN_ROW_HEIGHT:
height = self.MIN_ROW_HEIGHT
else:
# Esnure cells with multiple lines of text still have some padding:
height = size.height() + self.EXTRA_ROW_HEIGHT
return QtCore.QSize(size.width() + self.EXTRA_COL_WIDTH, height)
def paint(self, painter, option, index):
if isinstance(self.parent(), QtWidgets.QTableView):
# Disable rendering of selection highlight for TableViews, they handle
# it themselves with the background colour data:
option.state &= ~(QtWidgets.QStyle.State_Selected)
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
if index.column() > 0:
painter.setPen(self._pen)
painter.drawLine(option.rect.topLeft(), option.rect.bottomLeft())
def eventFilter(self, obj, event):
"""Filter events before they get to the editor, so that editing is ended when
the user presses tab, shift-tab or enter (which otherwise would not end editing
in a QTextEdit)."""
if event.type() == QtCore.QEvent.KeyPress:
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
# Allow shift-enter
if not event.modifiers() & QtCore.Qt.ShiftModifier:
self.commitData.emit(obj)
self.closeEditor.emit(obj)
return True
elif event.key() == QtCore.Qt.Key_Tab:
self.commitData.emit(obj)
self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditNextItem)
return True
elif event.key() == QtCore.Qt.Key_Backtab:
self.commitData.emit(obj)
self.closeEditor.emit(obj, QtWidgets.QStyledItemDelegate.EditPreviousItem)
return True
return QtWidgets.QStyledItemDelegate.eventFilter(self, obj, event)
def createEditor(self, parent, option, index):
return Editor(parent)
def setEditorData(self, editor, index):
editor.setPlainText(index.data())
font = index.data(QtCore.Qt.FontRole)
default_font = qapplication.font(self.parent())
if font is None:
font = default_font
font.setPointSize(default_font.pointSize())
editor.setFont(font)
font_height = QtGui.QFontMetrics(font).height()
padding = (self.MIN_ROW_HEIGHT - font_height) / 2 - 1
editor.document().setDocumentMargin(padding)
editor.selectAll()
def setModelData(self, editor, model, index):
model.setData(index, editor.toPlainText())
class GroupTab(object):
GLOBALS_COL_DELETE = 0
GLOBALS_COL_NAME = 1
GLOBALS_COL_VALUE = 2
GLOBALS_COL_UNITS = 3
GLOBALS_COL_EXPANSION = 4
GLOBALS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GLOBALS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 2
GLOBALS_ROLE_PREVIOUS_TEXT = QtCore.Qt.UserRole + 3
GLOBALS_ROLE_IS_BOOL = QtCore.Qt.UserRole + 4
COLOR_ERROR = '#F79494' # light red
COLOR_OK = '#A5F7C6' # light green
COLOR_BOOL_ON = '#63F731' # bright green
COLOR_BOOL_OFF = '#608060' # dark green
GLOBALS_DUMMY_ROW_TEXT = '<Click to add global>'
def __init__(self, tabWidget, globals_file, group_name):
self.tabWidget = tabWidget
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(runmanager_dir, 'group.ui'))
# Add the ui to the parent tabWidget:
self.tabWidget.addTab(self.ui, group_name, closable=True)
self.set_file_and_group_name(globals_file, group_name)
self.globals_model = AlternatingColorModel(view=self.ui.tableView_globals)
self.globals_model.setHorizontalHeaderLabels(['Delete', 'Name', 'Value', 'Units', 'Expansion'])
self.globals_model.setSortRole(self.GLOBALS_ROLE_SORT_DATA)
self.ui.tableView_globals.setModel(self.globals_model)
self.ui.tableView_globals.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self.ui.tableView_globals.setSelectionMode(QtWidgets.QTableView.ExtendedSelection)
self.ui.tableView_globals.setSortingEnabled(True)
# Make it so the user can just start typing on an item to edit:
self.ui.tableView_globals.setEditTriggers(QtWidgets.QTableView.AnyKeyPressed |
QtWidgets.QTableView.EditKeyPressed)
# Ensure the clickable region of the delete button doesn't extend forever:
self.ui.tableView_globals.horizontalHeader().setStretchLastSection(False)
# Stretch the value column to fill available space:
self.ui.tableView_globals.horizontalHeader().setSectionResizeMode(
self.GLOBALS_COL_VALUE, QtWidgets.QHeaderView.Stretch
)
# Setup stuff for a custom context menu:
self.ui.tableView_globals.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_globals_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected global(s)', self.ui)
self.action_globals_set_selected_true = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected Booleans True', self.ui)
self.action_globals_set_selected_false = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected Booleans False', self.ui)
self.connect_signals()
# Populate the model with globals from the h5 file:
self.populate_model()
# Set sensible column widths:
for col in range(self.globals_model.columnCount()):
if col != self.GLOBALS_COL_VALUE:
self.ui.tableView_globals.resizeColumnToContents(col)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_NAME) < 200:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_NAME, 200)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_VALUE) < 200:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_VALUE, 200)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_UNITS) < 100:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_UNITS, 100)
if self.ui.tableView_globals.columnWidth(self.GLOBALS_COL_EXPANSION) < 100:
self.ui.tableView_globals.setColumnWidth(self.GLOBALS_COL_EXPANSION, 100)
self.ui.tableView_globals.resizeColumnToContents(self.GLOBALS_COL_DELETE)
# Error state of tab
self.tab_contains_errors = False
def connect_signals(self):
self.ui.tableView_globals.leftClicked.connect(self.on_tableView_globals_leftClicked)
self.ui.tableView_globals.customContextMenuRequested.connect(self.on_tableView_globals_context_menu_requested)
self.action_globals_set_selected_true.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('True'))
self.action_globals_set_selected_false.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('False'))
self.action_globals_delete_selected.triggered.connect(self.on_globals_delete_selected_triggered)
self.globals_model.itemChanged.connect(self.on_globals_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.globals_model_item_changed_disconnected = DisconnectContextManager(
self.globals_model.itemChanged, self.on_globals_model_item_changed)
def set_file_and_group_name(self, globals_file, group_name):
"""Provided as a separate method so the main app can call it if the
group gets renamed"""
self.globals_file = globals_file
self.group_name = group_name
self.ui.label_globals_file.setText(globals_file)
self.ui.label_group_name.setText(group_name)
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.setTabText(index, group_name)
self.tabWidget.setTabToolTip(index, '%s\n(%s)' % (group_name, globals_file))
def set_tab_icon(self, icon_string):
index = self.tabWidget.indexOf(self.ui)
if icon_string is not None:
icon = QtGui.QIcon(icon_string)
else:
icon = QtGui.QIcon()
if self.tabWidget.tabIcon(index).cacheKey() != icon.cacheKey():
logger.info('setting tab icon')
self.tabWidget.setTabIcon(index, icon)
def populate_model(self):
globals = runmanager.get_globals({self.group_name: self.globals_file})[self.group_name]
for name, (value, units, expansion) in globals.items():
row = self.make_global_row(name, value, units, expansion)
self.globals_model.appendRow(row)
value_item = row[self.GLOBALS_COL_VALUE]
self.check_for_boolean_values(value_item)
expansion_item = row[self.GLOBALS_COL_EXPANSION]
self.on_globals_model_expansion_changed(expansion_item)
# Add the dummy item at the end:
dummy_delete_item = QtGui.QStandardItem()
# This lets later code know that this row does not correspond to an
# actual global:
dummy_delete_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item.setToolTip('Click to add global')
dummy_name_item = QtGui.QStandardItem(self.GLOBALS_DUMMY_ROW_TEXT)
dummy_name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
dummy_name_item.setToolTip('Click to add global')
dummy_name_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GLOBALS_DUMMY_ROW_TEXT, self.GLOBALS_ROLE_PREVIOUS_TEXT)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_value_item = QtGui.QStandardItem()
dummy_value_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_value_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_value_item.setToolTip('Click to add global')
dummy_units_item = QtGui.QStandardItem()
dummy_units_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_units_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_units_item.setToolTip('Click to add global')
dummy_expansion_item = QtGui.QStandardItem()
dummy_expansion_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_expansion_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_expansion_item.setToolTip('Click to add global')
self.globals_model.appendRow(
[dummy_delete_item, dummy_name_item, dummy_value_item, dummy_units_item, dummy_expansion_item])
# Sort by name:
self.ui.tableView_globals.sortByColumn(self.GLOBALS_COL_NAME, QtCore.Qt.AscendingOrder)
def make_global_row(self, name, value='', units='', expansion=''):
logger.debug('%s:%s - make global row: %s ' % (self.globals_file, self.group_name, name))
# We just set some data here, other stuff is set in
# self.update_parse_indication after runmanager has a chance to parse
# everything and get back to us about what that data should be.
delete_item = QtGui.QStandardItem()
delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
delete_item.setData(False, self.GLOBALS_ROLE_SORT_DATA)
delete_item.setEditable(False)
delete_item.setToolTip('Delete global from group.')
name_item = QtGui.QStandardItem(name)
name_item.setData(name, self.GLOBALS_ROLE_SORT_DATA)
name_item.setData(name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_item.setToolTip(name)
name_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
value_item = QtGui.QStandardItem(value)
value_item.setData(value, self.GLOBALS_ROLE_SORT_DATA)
value_item.setData(str(value), self.GLOBALS_ROLE_PREVIOUS_TEXT)
value_item.setToolTip('Evaluating...')
value_item.setFont(QtGui.QFont(GLOBAL_MONOSPACE_FONT))
units_item = QtGui.QStandardItem(units)
units_item.setData(units, self.GLOBALS_ROLE_SORT_DATA)
units_item.setData(units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
units_item.setToolTip('')
expansion_item = QtGui.QStandardItem(expansion)
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
expansion_item.setToolTip('')
row = [delete_item, name_item, value_item, units_item, expansion_item]
return row
def on_tableView_globals_leftClicked(self, index):
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.globals_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new global' row. Enter editing mode on
# the name item so they can enter a name for the new global:
self.ui.tableView_globals.setCurrentIndex(name_index)
self.ui.tableView_globals.edit(name_index)
elif item.data(self.GLOBALS_ROLE_IS_BOOL):
# It's a bool indicator. Toggle it
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
value_item.setText('False')
elif value_item.text() == 'False':
value_item.setText('True')
else:
raise AssertionError('expected boolean value')
elif item.column() == self.GLOBALS_COL_DELETE:
# They clicked a delete button.
self.delete_global(global_name)
elif not item.data(self.GLOBALS_ROLE_IS_BOOL):
# Edit whatever it is:
if (self.ui.tableView_globals.currentIndex() != index
or self.ui.tableView_globals.state() != QtWidgets.QTreeView.EditingState):
self.ui.tableView_globals.setCurrentIndex(index)
self.ui.tableView_globals.edit(index)
def on_globals_model_item_changed(self, item):
if item.column() == self.GLOBALS_COL_NAME:
self.on_globals_model_name_changed(item)
elif item.column() == self.GLOBALS_COL_VALUE:
self.on_globals_model_value_changed(item)
elif item.column() == self.GLOBALS_COL_UNITS:
self.on_globals_model_units_changed(item)
elif item.column() == self.GLOBALS_COL_EXPANSION:
self.on_globals_model_expansion_changed(item)
def on_globals_model_name_changed(self, item):
"""Handles global renaming and creation of new globals due to the user
editing the <click to add global> item"""
item_text = item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
if item_text != self.GLOBALS_DUMMY_ROW_TEXT:
# The user has made a new global by editing the <click to add
# global> item
global_name = item_text
self.new_global(global_name)
else:
# User has renamed a global.
new_global_name = item_text
previous_global_name = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
# Ensure the name actually changed, rather than something else
# about the item:
if new_global_name != previous_global_name:
self.rename_global(previous_global_name, new_global_name)
def on_globals_model_value_changed(self, item):
index = item.index()
new_value = item.text()
previous_value = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Ensure the value actually changed, rather than something else about
# the item:
if new_value != previous_value:
self.change_global_value(global_name, previous_value, new_value)
def on_globals_model_units_changed(self, item):
index = item.index()
new_units = item.text()
previous_units = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# If it's a boolean value, ensure the check state matches the bool state:
if item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
item.setCheckState(QtCore.Qt.Checked)
elif value_item.text() == 'False':
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('expected boolean value')
# Ensure the value actually changed, rather than something else about
# the item:
if new_units != previous_units:
self.change_global_units(global_name, previous_units, new_units)
def on_globals_model_expansion_changed(self, item):
index = item.index()
new_expansion = item.text()
previous_expansion = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Don't want icon changing to recurse - which happens even if it is
# the same icon. So disconnect the signal temporarily:
with self.globals_model_item_changed_disconnected:
if new_expansion == 'outer':
item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be outer producted with other lists to form a larger parameter space.')
elif new_expansion:
item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be iterated over in lock-step with other globals in the ' +
'\'%s\' zip group.' % new_expansion)
else:
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('This global will be interpreted as a single value and passed to compilation as-is.')
# Ensure the value actually changed, rather than something else about
# the item:
if new_expansion != previous_expansion:
self.change_global_expansion(global_name, previous_expansion, new_expansion)
def on_tableView_globals_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_globals_set_selected_true)
menu.addAction(self.action_globals_set_selected_false)
menu.addAction(self.action_globals_delete_selected)
menu.exec_(QtGui.QCursor.pos())
def on_globals_delete_selected_triggered(self):
selected_indexes = self.ui.tableView_globals.selectedIndexes()
selected_items = (self.globals_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_NAME]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_global so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d globals?" % len(name_items)):
return
for item in name_items:
global_name = item.text()
self.delete_global(global_name, confirm=not confirm_multiple)
def on_globals_set_selected_bools_triggered(self, state):
selected_indexes = self.ui.tableView_globals.selectedIndexes()
selected_items = [self.globals_model.itemFromIndex(index) for index in selected_indexes]
value_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_VALUE]
units_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_UNITS]
for value_item, units_item in zip(value_items, units_items):
if units_item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item.setText(state)
def close(self):
# It is up to the main runmanager class to drop references to this
# instance before or after calling this method, so that after the
# tabWidget no longer owns our widgets, both the widgets and the
# instance will be garbage collected.
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.removeTab(index)
def get_global_item_by_name(self, global_name, column, previous_name=None):
"""Returns an item from the row representing a global in the globals model.
Which item is returned is set by the column argument."""
possible_name_items = self.globals_model.findItems(global_name, column=self.GLOBALS_COL_NAME)
if previous_name is not None:
# Filter by previous name, useful for telling rows apart when a
# rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) == previous_name]
elif global_name != self.GLOBALS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new global is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.globals_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.tableView_globals.horizontalHeader()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.tableView_globals.sortByColumn(sort_column, sort_order)
def new_global(self, global_name):
logger.info('%s:%s - new global: %s', self.globals_file, self.group_name, global_name)
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME,
previous_name=self.GLOBALS_DUMMY_ROW_TEXT)
try:
runmanager.new_global(self.globals_file, self.group_name, global_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created global into the model:
global_row = self.make_global_row(global_name)
last_index = self.globals_model.rowCount()
# Insert it as the row before the last (dummy) row:
self.globals_model.insertRow(last_index - 1, global_row)
self.do_model_sort()
# Go into edit mode on the 'value' item:
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE,
previous_name=global_name)
value_item_index = value_item.index()
self.ui.tableView_globals.setCurrentIndex(value_item_index)
self.ui.tableView_globals.edit(value_item_index)
self.globals_changed()
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GLOBALS_DUMMY_ROW_TEXT)
def rename_global(self, previous_global_name, new_global_name):
logger.info('%s:%s - rename global: %s -> %s',
self.globals_file, self.group_name, previous_global_name, new_global_name)
item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_NAME,
previous_name=previous_global_name)
try:
runmanager.rename_global(self.globals_file, self.group_name, previous_global_name, new_global_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_global_name)
else:
item.setData(new_global_name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_global_name, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
item.setToolTip(new_global_name)
self.globals_changed()
value_item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_VALUE)
value = value_item.text()
if not value and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:
# Go into editing the value item automatically if not already in edit mode:
value_item_index = value_item.index()
self.ui.tableView_globals.setCurrentIndex(value_item_index)
self.ui.tableView_globals.edit(value_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_value(self, global_name, previous_value, new_value, interactive=True):
logger.info('%s:%s - change global value: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_value, new_value))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if not interactive:
# Value was not set interactively by the user, it is up to us to set it:
with self.globals_model_item_changed_disconnected:
item.setText(new_value)
previous_background = item.background()
previous_icon = item.icon()
item.setData(new_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(None, QtCore.Qt.BackgroundRole)
item.setIcon(QtGui.QIcon(':qtutils/fugue/hourglass'))
args = global_name, previous_value, new_value, item, previous_background, previous_icon
if interactive:
QtCore.QTimer.singleShot(1, lambda: self.complete_change_global_value(*args))
else:
self.complete_change_global_value(*args, interactive=False)
def complete_change_global_value(self, global_name, previous_value, new_value, item, previous_background, previous_icon, interactive=True):
try:
runmanager.set_value(self.globals_file, self.group_name, global_name, new_value)
except Exception as e:
if interactive:
error_dialog(str(e))
# Set the item text back to the old name, since the change failed:
with self.globals_model_item_changed_disconnected:
item.setText(previous_value)
item.setData(previous_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(previous_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(previous_background, QtCore.Qt.BackgroundRole)
item.setIcon(previous_icon)
if not interactive:
raise
else:
self.check_for_boolean_values(item)
self.do_model_sort()
item.setToolTip('Evaluating...')
self.globals_changed()
if not interactive:
return
units_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
units = units_item.text()
if not units and self.ui.tableView_globals.state() != QtWidgets.QAbstractItemView.EditingState:
# Go into editing the units item automatically if not already in edit mode:
units_item_index = units_item.index()
self.ui.tableView_globals.setCurrentIndex(units_item_index)
self.ui.tableView_globals.edit(units_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_units(self, global_name, previous_units, new_units):
logger.info('%s:%s - change units: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_units, new_units))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
try:
runmanager.set_units(self.globals_file, self.group_name, global_name, new_units)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_units)
else:
item.setData(new_units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_units, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def change_global_expansion(self, global_name, previous_expansion, new_expansion):
logger.info('%s:%s - change expansion: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_expansion, new_expansion))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
try:
runmanager.set_expansion(self.globals_file, self.group_name, global_name, new_expansion)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_expansion)
else:
item.setData(new_expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_expansion, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
self.globals_changed()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.tableView_globals, item)
def check_for_boolean_values(self, item):
"""Checks if the value is 'True' or 'False'. If either, makes the
units cell checkable, uneditable, and coloured to indicate the state.
The units cell can then be clicked to toggle the value."""
index = item.index()
value = item.text()
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
units_index = index.sibling(index.row(), self.GLOBALS_COL_UNITS)
name_item = self.globals_model.itemFromIndex(name_index)
units_item = self.globals_model.itemFromIndex(units_index)
global_name = name_item.text()
logger.debug('%s:%s - check for boolean values: %s' %
(self.globals_file, self.group_name, global_name))
if value == 'True':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!1', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Checked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_ON)))
elif value == 'False':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!0', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Unchecked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_OFF)))
else:
was_bool = units_item.data(self.GLOBALS_ROLE_IS_BOOL)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
units_item.setEditable(True)
# Checkbox still visible unless we do the following:
units_item.setData(None, QtCore.Qt.CheckStateRole)
units_item.setData(None, QtCore.Qt.BackgroundRole)
if was_bool:
# If the item was a bool and now isn't, clear the
# units and go into editing so the user can enter a
# new units string:
units_item.setText('')
self.ui.tableView_globals.setCurrentIndex(units_item.index())
self.ui.tableView_globals.edit(units_item.index())
def globals_changed(self):
"""Called whenever something about a global has changed. call
app.globals_changed to inform the main application that it needs to
parse globals again. self.update_parse_indication will be called by
the main app when parsing is done, and will set the colours and
tooltips appropriately"""
# Tell the main app about it:
app.globals_changed()
def delete_global(self, global_name, confirm=True):
logger.info('%s:%s - delete global: %s' %
(self.globals_file, self.group_name, global_name))
if confirm:
if not question_dialog("Delete the global '%s'?" % global_name):
return
runmanager.delete_global(self.globals_file, self.group_name, global_name)
# Find the entry for this global in self.globals_model and remove it:
name_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME)
self.globals_model.removeRow(name_item.row())
self.globals_changed()
def update_parse_indication(self, active_groups, sequence_globals, evaled_globals):
# Check that we are an active group:
if self.group_name in active_groups and active_groups[self.group_name] == self.globals_file:
self.tab_contains_errors = False
# for global_name, value in evaled_globals[self.group_name].items():
for i in range(self.globals_model.rowCount()):
name_item = self.globals_model.item(i, self.GLOBALS_COL_NAME)
if name_item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
value_item = self.globals_model.item(i, self.GLOBALS_COL_VALUE)
expansion_item = self.globals_model.item(i, self.GLOBALS_COL_EXPANSION)
# value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
# expansion_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
global_name = name_item.text()
value = evaled_globals[self.group_name][global_name]
ignore, ignore, expansion = sequence_globals[self.group_name][global_name]
# Temporarily disconnect the item_changed signal on the model
# so that we can set the expansion type without triggering
# another preparse - the parsing has already been done with
# the new expansion type.
with self.globals_model_item_changed_disconnected:
if expansion_item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) != expansion:
# logger.info('expansion previous text set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
if expansion_item.data(self.GLOBALS_ROLE_SORT_DATA) != expansion:
# logger.info('sort data role set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
# The next line will now trigger item_changed, but it will not
# be detected as an actual change to the expansion type,
# because previous_text will match text. So it will not look
# like a change and will not trigger preparsing. However It is
# still important that other triggers be processed, such as
# setting the icon in the expansion item, so that will still
# occur in the callback.
expansion_item.setText(expansion)
if isinstance(value, Exception):
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_ERROR)))
value_item.setIcon(QtGui.QIcon(':qtutils/fugue/exclamation'))
tooltip = '%s: %s' % (value.__class__.__name__, str(value))
self.tab_contains_errors = True
else:
if value_item.background().color().name().lower() != self.COLOR_OK.lower():
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_OK)))
if not value_item.icon().isNull():
# logger.info('clearing icon')
value_item.setData(None, QtCore.Qt.DecorationRole)
tooltip = repr(value)
if value_item.toolTip() != tooltip:
# logger.info('tooltip_changed')
value_item.setToolTip(tooltip)
if self.tab_contains_errors:
self.set_tab_icon(':qtutils/fugue/exclamation')
else:
self.set_tab_icon(None)
else:
# Clear everything:
self.set_tab_icon(None)
for row in range(self.globals_model.rowCount()):
item = self.globals_model.item(row, self.GLOBALS_COL_VALUE)
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('Group inactive')
item.setData(None, QtCore.Qt.BackgroundRole)
class RunmanagerMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
def closeEvent(self, event):
if app.on_close_event():
return QtWidgets.QMainWindow.closeEvent(self, event)
else:
event.ignore()
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class PoppedOutOutputBoxWindow(QtWidgets.QDialog):
def closeEvent(self, event):
app.on_output_popout_button_clicked()
class RunManager(object):
# Constants for the model in the axes tab:
AXES_COL_NAME = 0
AXES_COL_LENGTH = 1
AXES_COL_SHUFFLE = 2
AXES_ROLE_NAME = QtCore.Qt.UserRole + 1
# Constants for the model in the groups tab:
GROUPS_COL_NAME = 0
GROUPS_COL_ACTIVE = 1
GROUPS_COL_DELETE = 2
GROUPS_COL_OPENCLOSE = 3
GROUPS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GROUPS_ROLE_PREVIOUS_NAME = QtCore.Qt.UserRole + 2
GROUPS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 3
GROUPS_ROLE_GROUP_IS_OPEN = QtCore.Qt.UserRole + 4
GROUPS_DUMMY_ROW_TEXT = '<Click to add group>'
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
loader.registerCustomWidget(FingerTabWidget)
loader.registerCustomWidget(TreeView)
self.ui = loader.load(
os.path.join(runmanager_dir, 'main.ui'), RunmanagerMainWindow()
)
self.output_box = OutputBox(self.ui.verticalLayout_output_tab)
# Add a 'pop-out' button to the output tab:
output_tab_index = self.ui.tabWidget.indexOf(self.ui.tab_output)
self.output_popout_button = TabToolButton(self.ui.tabWidget.parent())
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
self.output_popout_button.setToolTip('Toggle whether the output box is in a separate window')
self.ui.tabWidget.tabBar().setTabButton(output_tab_index, QtWidgets.QTabBar.RightSide, self.output_popout_button)
# Fix the first three tabs in place:
for index in range(3):
self.ui.tabWidget.tabBar().setMovable(False, index=index)
# Whether or not the output box is currently popped out:
self.output_box_is_popped_out = False
# The window it will be moved to when popped out:
self.output_box_window = PoppedOutOutputBoxWindow(self.ui, QtCore.Qt.WindowSystemMenuHint)
self.output_box_window_verticalLayout = QtWidgets.QVBoxLayout(self.output_box_window)
self.output_box_window_verticalLayout.setContentsMargins(0, 0, 0, 0)
self.output_box_window.setWindowTitle('runmanager output')
self.output_box_window.resize(800, 1000)
self.setup_config()
self.setup_axes_tab()
self.setup_groups_tab()
self.connect_signals()
# The last location from which a labscript file was selected, defaults
# to labscriptlib:
self.last_opened_labscript_folder = self.exp_config.get('paths', 'labscriptlib')
# The last location from which a globals file was selected, defaults
# to experiment_shot_storage:
self.last_opened_globals_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# The last file to which the user saved or loaded a configuration:
self.last_save_config_file = None
# The last manually selected shot output folder, defaults to
# experiment_shot_storage:
self.last_selected_shot_output_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.shared_drive_prefix = self.exp_config.get('paths', 'shared_drive')
self.experiment_shot_storage = self.exp_config.get('paths', 'experiment_shot_storage')
# Store the currently open groups as {(globals_filename, group_name): GroupTab}
self.currently_open_groups = {}
# A thread that will evaluate globals when they change, allowing us to
# show their values and any errors in the tabs they came from.
self.preparse_globals_thread = threading.Thread(target=self.preparse_globals_loop)
self.preparse_globals_thread.daemon = True
# A Queue for informing the preparser thread when globals have changed, and thus
# need parsing again. It is a queue rather than a threading.Event() so that
# callers can call Queue.join() to wait for parsing to complete in a race-free
# way
self.preparse_globals_required = queue.Queue()
self.preparse_globals_thread.start()
# A flag telling the compilation thread to abort:
self.compilation_aborted = threading.Event()
# A few attributes for self.guess_expansion_modes() to keep track of
# its state, and thus detect changes:
self.previous_evaled_globals = {}
self.previous_global_hierarchy = {}
self.previous_expansion_types = {}
self.previous_expansions = {}
# The prospective number of shots resulting from compilation
self.n_shots = None
# Start the loop that allows compilations to be queued up:
self.compile_queue = queue.Queue()
self.compile_queue_thread = threading.Thread(target=self.compile_loop)
self.compile_queue_thread.daemon = True
self.compile_queue_thread.start()
splash.update_text('starting compiler subprocess')
# Start the compiler subprocess:
self.to_child, self.from_child, self.child = process_tree.subprocess(
os.path.join(runmanager_dir, 'batch_compiler.py'),
output_redirection_port=self.output_box.port,
)
# Is blank until a labscript file is selected:
self.previous_default_output_folder = ''
# Start a thread to monitor the time of day and create new shot output
# folders for each day:
inthread(self.rollover_shot_output_folder)
self.non_default_folder = None
# The data from the last time we saved the configuration, so we can
# know if something's changed:
self.last_save_data = None
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('runmanager', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Defer this until 50ms after the window has shown,
# so that the GUI pops up faster in the meantime
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
splash.update_text('done')
self.ui.show()
def setup_config(self):
required_config_params = {"DEFAULT": ["apparatus_name"],
"programs": ["text_editor",
"text_editor_arguments",
],
"ports": ['BLACS', 'runviewer'],
"paths": ["shared_drive",
"experiment_shot_storage",
"labscriptlib",
],
}
self.exp_config = LabConfig(required_params = required_config_params)
def setup_axes_tab(self):
self.axes_model = QtGui.QStandardItemModel()
# Setup the model columns and link to the treeview
name_header_item = QtGui.QStandardItem('Name')
name_header_item.setToolTip('The name of the global or zip group being iterated over')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_NAME, name_header_item)
length_header_item = QtGui.QStandardItem('Length')
length_header_item.setToolTip('The number of elements in the axis of the parameter space')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_LENGTH, length_header_item)
shuffle_header_item = QtGui.QStandardItem('Shuffle')
shuffle_header_item.setToolTip('Whether or not the order of the axis should be randomised')
shuffle_header_item.setIcon(QtGui.QIcon(':qtutils/fugue/arrow-switch'))
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_SHUFFLE, shuffle_header_item)
self.ui.treeView_axes.setModel(self.axes_model)
# Setup stuff for a custom context menu:
self.ui.treeView_axes.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_axes_check_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box'),
'Check selected', self.ui)
self.action_axes_uncheck_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'),
'Uncheck selected', self.ui)
# setup header widths
self.ui.treeView_axes.header().setStretchLastSection(False)
self.ui.treeView_axes.header().setSectionResizeMode(self.AXES_COL_NAME, QtWidgets.QHeaderView.Stretch)
def setup_groups_tab(self):
self.groups_model = QtGui.QStandardItemModel()
self.groups_model.setHorizontalHeaderLabels(['File/group name', 'Active', 'Delete', 'Open/Close'])
self.groups_model.setSortRole(self.GROUPS_ROLE_SORT_DATA)
self.ui.treeView_groups.setModel(self.groups_model)
self.ui.treeView_groups.setAnimated(True) # Pretty
self.ui.treeView_groups.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)
self.ui.treeView_groups.setSortingEnabled(True)
self.ui.treeView_groups.sortByColumn(self.GROUPS_COL_NAME, QtCore.Qt.AscendingOrder)
# Set column widths:
self.ui.treeView_groups.setColumnWidth(self.GROUPS_COL_NAME, 400)
# Make it so the user can just start typing on an item to edit:
self.ui.treeView_groups.setEditTriggers(QtWidgets.QTreeView.AnyKeyPressed |
QtWidgets.QTreeView.EditKeyPressed |
QtWidgets.QTreeView.SelectedClicked)
# Ensure the clickable region of the open/close button doesn't extend forever:
self.ui.treeView_groups.header().setStretchLastSection(False)
# Stretch the filpath/groupname column to fill available space:
self.ui.treeView_groups.header().setSectionResizeMode(
self.GROUPS_COL_NAME, QtWidgets.QHeaderView.Stretch
)
# Shrink columns other than the 'name' column to the size of their headers:
for column in range(self.groups_model.columnCount()):
if column != self.GROUPS_COL_NAME:
self.ui.treeView_groups.resizeColumnToContents(column)
self.ui.treeView_groups.setTextElideMode(QtCore.Qt.ElideMiddle)
# Setup stuff for a custom context menu:
self.ui.treeView_groups.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_groups_set_selection_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected group(s) active', self.ui)
self.action_groups_set_selection_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected group(s) inactive', self.ui)
self.action_groups_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected group(s)', self.ui)
self.action_groups_open_selected = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/plus'), 'Open selected group(s)', self.ui)
self.action_groups_close_selected_groups = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected group(s)', self.ui)
self.action_groups_close_selected_files = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected file(s)', self.ui)
# A counter for keeping track of the recursion depth of
# self._groups_model_active_changed(). This is used so that some
# actions can be taken in response to initial data changes, but not to
# flow-on changes made by the method itself:
self.on_groups_model_active_changed_recursion_depth = 0
def connect_signals(self):
# The button that pops the output box in and out:
self.output_popout_button.clicked.connect(self.on_output_popout_button_clicked)
# The menu items:
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionQuit.triggered.connect(self.ui.close)
# labscript file and folder selection stuff:
self.ui.toolButton_select_labscript_file.clicked.connect(self.on_select_labscript_file_clicked)
self.ui.toolButton_select_shot_output_folder.clicked.connect(self.on_select_shot_output_folder_clicked)
self.ui.toolButton_edit_labscript_file.clicked.connect(self.on_edit_labscript_file_clicked)
self.ui.toolButton_reset_shot_output_folder.clicked.connect(self.on_reset_shot_output_folder_clicked)
self.ui.lineEdit_labscript_file.textChanged.connect(self.on_labscript_file_text_changed)
self.ui.lineEdit_shot_output_folder.textChanged.connect(self.on_shot_output_folder_text_changed)
# Control buttons; engage, abort, restart subprocess:
self.ui.pushButton_engage.clicked.connect(self.on_engage_clicked)
self.ui.pushButton_abort.clicked.connect(self.on_abort_clicked)
self.ui.pushButton_restart_subprocess.clicked.connect(self.on_restart_subprocess_clicked)
# shuffle master control
self.ui.pushButton_shuffle.stateChanged.connect(self.on_master_shuffle_clicked)
# Tab closebutton clicked:
self.ui.tabWidget.tabCloseRequested.connect(self.on_tabCloseRequested)
# Axes tab; right click menu, menu actions, reordering
# self.ui.treeView_axes.customContextMenuRequested.connect(self.on_treeView_axes_context_menu_requested)
self.action_axes_check_selected.triggered.connect(self.on_axes_check_selected_triggered)
self.action_axes_uncheck_selected.triggered.connect(self.on_axes_uncheck_selected_triggered)
self.ui.toolButton_axis_to_top.clicked.connect(self.on_axis_to_top_clicked)
self.ui.toolButton_axis_up.clicked.connect(self.on_axis_up_clicked)
self.ui.toolButton_axis_down.clicked.connect(self.on_axis_down_clicked)
self.ui.toolButton_axis_to_bottom.clicked.connect(self.on_axis_to_bottom_clicked)
# axes tab item changed handler
self.axes_model.itemChanged.connect(self.on_axes_item_changed)
self.axes_model.rowsRemoved.connect(self.update_global_shuffle_state)
self.axes_model.rowsInserted.connect(self.update_global_shuffle_state)
# Groups tab; right click menu, menu actions, open globals file, new globals file, diff globals file,
self.ui.treeView_groups.customContextMenuRequested.connect(self.on_treeView_groups_context_menu_requested)
self.action_groups_set_selection_active.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Checked))
self.action_groups_set_selection_inactive.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Unchecked))
self.action_groups_delete_selected.triggered.connect(self.on_groups_delete_selected_triggered)
self.action_groups_open_selected.triggered.connect(self.on_groups_open_selected_triggered)
self.action_groups_close_selected_groups.triggered.connect(self.on_groups_close_selected_groups_triggered)
self.action_groups_close_selected_files.triggered.connect(self.on_groups_close_selected_files_triggered)
self.ui.pushButton_open_globals_file.clicked.connect(self.on_open_globals_file_clicked)
self.ui.pushButton_new_globals_file.clicked.connect(self.on_new_globals_file_clicked)
self.ui.pushButton_diff_globals_file.clicked.connect(self.on_diff_globals_file_clicked)
self.ui.treeView_groups.leftClicked.connect(self.on_treeView_groups_leftClicked)
self.ui.treeView_groups.doubleLeftClicked.connect(self.on_treeView_groups_doubleLeftClicked)
self.groups_model.itemChanged.connect(self.on_groups_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.groups_model_item_changed_disconnected = DisconnectContextManager(
self.groups_model.itemChanged, self.on_groups_model_item_changed)
# Keyboard shortcuts:
engage_shortcut = QtWidgets.QShortcut('F5', self.ui,
lambda: self.ui.pushButton_engage.clicked.emit(False))
engage_shortcut.setAutoRepeat(False)
QtWidgets.QShortcut('ctrl+W', self.ui, self.close_current_tab)
QtWidgets.QShortcut('ctrl+Tab', self.ui, lambda: self.switch_tabs(+1))
QtWidgets.QShortcut('ctrl+shift+Tab', self.ui, lambda: self.switch_tabs(-1))
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
self.to_child.put(['quit', None])
return True
def close_current_tab(self):
current_tab_widget = self.ui.tabWidget.currentWidget()
for (globals_file, group_name), tab in self.currently_open_groups.items():
if tab.ui is current_tab_widget:
self.close_group(globals_file, group_name)
def switch_tabs(self, change):
current_index = self.ui.tabWidget.currentIndex()
n_tabs = self.ui.tabWidget.count()
new_index = (current_index + change) % n_tabs
self.ui.tabWidget.setCurrentIndex(new_index)
def on_output_popout_button_clicked(self):
if self.output_box_is_popped_out:
self.ui.verticalLayout_output_tab.addWidget(self.output_box.output_textedit)
self.output_box_window.hide()
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
else:
# pop it out
# self.ui.verticalLayout_output_tab.remove(self.output_box)
self.output_box_window_verticalLayout.addWidget(self.output_box.output_textedit)
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-in'))
self.output_box_window.show()
self.output_box_is_popped_out = not self.output_box_is_popped_out
def on_select_labscript_file_clicked(self, checked):
labscript_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select labscript file',
self.last_opened_labscript_folder,
"Python files (*.py)")
if type(labscript_file) is tuple:
labscript_file, _ = labscript_file
if not labscript_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
labscript_file = os.path.abspath(labscript_file)
if not os.path.isfile(labscript_file):
error_dialog("No such file %s." % labscript_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_labscript_folder = os.path.dirname(labscript_file)
# Write the file to the lineEdit:
self.ui.lineEdit_labscript_file.setText(labscript_file)
def on_edit_labscript_file_clicked(self, checked):
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
# Ignore if no file selected
if not current_labscript_file:
return
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else current_labscript_file for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [current_labscript_file] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_select_shot_output_folder_clicked(self, checked):
shot_output_folder = QtWidgets.QFileDialog.getExistingDirectory(self.ui,
'Select shot output folder',
self.last_selected_shot_output_folder)
if type(shot_output_folder) is tuple:
shot_output_folder, _ = shot_output_folder
if not shot_output_folder:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_output_folder = os.path.abspath(shot_output_folder)
# Save the containing folder for use next time we open the dialog box:
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
# Write the file to the lineEdit:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
# Update our knowledge about whether this is the default output folder or not:
self.check_output_folder_update()
def on_reset_shot_output_folder_clicked(self, checked):
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
return
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
self.check_output_folder_update()
def on_labscript_file_text_changed(self, text):
# Blank out the 'edit labscript file' button if no labscript file is
# selected
enabled = bool(text)
self.ui.toolButton_edit_labscript_file.setEnabled(enabled)
# Blank out the 'select shot output folder' button if no labscript
# file is selected:
self.ui.toolButton_select_shot_output_folder.setEnabled(enabled)
self.ui.lineEdit_labscript_file.setToolTip(text)
# Check if the output folder needs to be updated:
self.check_output_folder_update()
def on_shot_output_folder_text_changed(self, text):
# Blank out the 'reset default output folder' button if the user is
# already using the default output folder
if text == self.get_default_output_folder():
self.non_default_folder = False
else:
self.non_default_folder = True
self.ui.toolButton_reset_shot_output_folder.setEnabled(self.non_default_folder)
self.ui.label_non_default_folder.setVisible(self.non_default_folder)
self.ui.lineEdit_shot_output_folder.setToolTip(text)
def on_engage_clicked(self):
logger.info('Engage')
try:
send_to_BLACS = self.ui.checkBox_run_shots.isChecked()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
labscript_file = self.ui.lineEdit_labscript_file.text()
# even though we shuffle on a per global basis, if ALL of the globals are set to shuffle, then we may as well shuffle again. This helps shuffle shots more randomly than just shuffling within each level (because without this, you would still do all shots with the outer most variable the same, etc)
shuffle = self.ui.pushButton_shuffle.checkState() == QtCore.Qt.Checked
if not labscript_file:
raise Exception('Error: No labscript file selected')
output_folder = self.ui.lineEdit_shot_output_folder.text()
if not output_folder:
raise Exception('Error: No output folder selected')
BLACS_host = self.ui.lineEdit_BLACS_hostname.text()
logger.info('Parsing globals...')
active_groups = self.get_active_groups()
# Get ordering of expansion globals
expansion_order = {}
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
name = item.data(self.AXES_ROLE_NAME)
expansion_order[name] = {'order':i, 'shuffle':shuffle_item.checkState()}
try:
sequenceglobals, shots, evaled_globals, global_hierarchy, expansions = self.parse_globals(active_groups, expansion_order=expansion_order)
except Exception as e:
raise Exception('Error parsing globals:\n%s\nCompilation aborted.' % str(e))
logger.info('Making h5 files')
labscript_file, run_files = self.make_h5_files(
labscript_file, output_folder, sequenceglobals, shots, shuffle)
self.ui.pushButton_abort.setEnabled(True)
self.compile_queue.put([labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer])
except Exception as e:
self.output_box.output('%s\n\n' % str(e), red=True)
logger.info('end engage')
def on_abort_clicked(self):
self.compilation_aborted.set()
def on_restart_subprocess_clicked(self):
# Kill and restart the compilation subprocess
self.to_child.put(['quit', None])
self.from_child.put(['done', False])
time.sleep(0.1)
self.output_box.output('Asking subprocess to quit...')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=False))
def check_child_exited(self, timeout_time, kill=False):
self.child.poll()
if self.child.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill))
return
elif self.child.returncode is None:
if not kill:
self.child.terminate()
self.output_box.output('not responding.\n')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=True))
return
else:
self.child.kill()
self.output_box.output('Killed\n', red=True)
elif kill:
self.output_box.output('Terminated\n', red=True)
else:
self.output_box.output('done.\n')
self.output_box.output('Spawning new compiler subprocess...')
self.to_child, self.from_child, self.child = process_tree.subprocess(
os.path.join(runmanager_dir, 'batch_compiler.py'),
output_redirection_port=self.output_box.port,
)
self.output_box.output('done.\n')
self.output_box.output('Ready.\n\n')
def on_tabCloseRequested(self, index):
tab_page = self.ui.tabWidget.widget(index)
for (globals_file, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
self.close_group(globals_file, group_name)
break
def on_treeView_axes_context_menu_requested(self, point):
raise NotImplementedError
# menu = QtWidgets.QMenu(self.ui)
# menu.addAction(self.action_axes_check_selected)
# menu.addAction(self.action_axes_uncheck_selected)
# menu.exec_(QtGui.QCursor.pos())
pass
def on_axes_check_selected_triggered(self, *args):
raise NotImplementedError
def on_axes_uncheck_selected_triggered(self, *args):
raise NotImplementedError
def on_axis_to_top_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_axes_indentation()
def on_axis_up_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_axes_indentation()
def on_axis_down_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_axes_indentation()
def on_axis_to_bottom_clicked(self, checked):
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_axes_indentation()
def on_axes_item_changed(self, item):
if item.column() == self.AXES_COL_SHUFFLE:
self.update_global_shuffle_state()
def update_global_shuffle_state(self, *args, **kwargs):
all_checked = True
none_checked = True
for i in range(self.axes_model.rowCount()):
check_state = self.axes_model.item(i, self.AXES_COL_SHUFFLE).checkState() == QtCore.Qt.Checked
all_checked = all_checked and check_state
none_checked = none_checked and not check_state
if not all_checked and not none_checked:
self.ui.pushButton_shuffle.setTristate(True)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.PartiallyChecked)
elif none_checked and not all_checked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Unchecked)
elif all_checked and not none_checked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)
else:
# No axes. Set if partially checked, otherwise else leave it alone:
if self.ui.pushButton_shuffle.checkState() == QtCore.Qt.PartiallyChecked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)
def on_master_shuffle_clicked(self, state):
if state in [QtCore.Qt.Checked, QtCore.Qt.Unchecked]:
self.ui.pushButton_shuffle.setTristate(False)
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
if item.checkState() != state:
self.axes_model.item(i, self.AXES_COL_SHUFFLE).setCheckState(state)
def on_treeView_groups_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_groups_set_selection_active)
menu.addAction(self.action_groups_set_selection_inactive)
menu.addAction(self.action_groups_delete_selected)
menu.addAction(self.action_groups_open_selected)
menu.addAction(self.action_groups_close_selected_groups)
menu.addAction(self.action_groups_close_selected_files)
copy_menu = QtWidgets.QMenu('Copy selected group(s) to...', menu)
copy_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document-copy'))
menu.addMenu(copy_menu)
move_menu = QtWidgets.QMenu('Move selected group(s) to...', menu)
move_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document--arrow'))
menu.addMenu(move_menu)
# Create a dict of all filepaths -> filenames
filenames = {}
for index in range(self.groups_model.rowCount()):
filepath = self.groups_model.item(index, self.GROUPS_COL_NAME).text()
filenames[filepath] = filepath.split(os.sep)[-1]
# expand duplicate filenames until there is nomore duplicates
new_filename = {}
i = 2
while new_filename != filenames:
for filepath, filename in filenames.items():
if list(filenames.values()).count(filename) > 1:
new_filename[filepath] = os.sep.join(filepath.split(os.sep)[-i:])
else:
new_filename[filepath] = filename
filenames = new_filename
i += 1
# add all filenames to the copy and move submenu
for filepath, filename in filenames.items():
copy_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, False))
move_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, True))
menu.exec_(QtGui.QCursor.pos())
def on_groups_copy_selected_groups_triggered(self, dest_globals_file=None, delete_source_group=False):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
source_globals_file = item.parent().text()
self.copy_group(source_globals_file, item.text(), dest_globals_file, delete_source_group)
def on_groups_set_selection_active_triggered(self, checked_state):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
# Filter to only include the 'active' column:
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
active_items = (item for item in selected_items
if item.column() == self.GROUPS_COL_ACTIVE
and item.parent() is not None)
for item in active_items:
item.setCheckState(checked_state)
def on_groups_delete_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_group so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d groups?" % len(name_items)):
return
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
self.delete_group(globals_file, group_name, confirm=not confirm_multiple)
def on_groups_open_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = [self.groups_model.itemFromIndex(index) for index in selected_indexes]
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# Include all grous of selected globals files:
for item in selected_items:
if item.parent() is None:
children = [item.child(i) for i in range(item.rowCount())]
# Exclude <add new group> item, which is not selectable
name_items += [child for child in children if child.isSelectable() ]
filenames = set(item.parent().text() for item in name_items)
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name, trigger_preparse=False)
if name_items:
self.globals_changed()
def on_groups_close_selected_groups_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) in self.currently_open_groups:
self.close_group(globals_file, group_name)
def on_groups_close_selected_files_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is None]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE)
for item in name_items
for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if any(child_is_open):
if not question_dialog('Close %d file(s)? This will close %d currently open group(s).' %
(len(name_items), child_is_open.count(True))):
return
for item in name_items:
globals_file = item.text()
self.close_globals_file(globals_file, confirm=False)
def on_open_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
if not os.path.isfile(globals_file):
error_dialog("No such file %s." % globals_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Open the file:
self.open_globals_file(globals_file)
def on_new_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Create new globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
globals_file = os.path.abspath(globals_file)
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Create the new file and open it:
runmanager.new_globals_file(globals_file)
self.open_globals_file(globals_file)
def on_diff_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file to compare',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
# Get runmanager's globals
active_groups = self.get_active_groups()
if active_groups is None:
# Invalid group selection
return
# Get file's globals groups
other_groups = runmanager.get_all_groups(globals_file)
# Display the output tab so the user can see the output:
self.ui.tabWidget.setCurrentWidget(self.ui.tab_output)
self.output_box.output('Globals diff with:\n%s\n\n' % globals_file)
# Do the globals diff
globals_diff_table = runmanager.globals_diff_groups(active_groups, other_groups)
self.output_box.output(globals_diff_table)
self.output_box.output('Ready.\n\n')
def on_treeView_groups_leftClicked(self, index):
"""Here we respond to user clicks on the treeview. We do the following:
- If the user clicks on the <click to add group> dummy row, we go into
edit mode on it so they can enter the name of the new group they
want.
- If the user clicks on the icon to open or close a globals file or a
group, we call the appropriate open and close methods and update the
open/close data role on the model.
- If the user clicks delete on a globals group, we call a delete
method, which deletes it after confirmation, and closes it if it was
open.
"""
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.groups_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
# The parent item, None if there is no parent:
parent_item = item.parent()
# What kind of row did the user click on?
# A globals file, a group, or a 'click to add group' row?
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new group' row. Enter editing
# mode on the name item so they can enter a name for
# the new group:
self.ui.treeView_groups.setCurrentIndex(name_index)
self.ui.treeView_groups.edit(name_index)
if item.column() == self.GROUPS_COL_ACTIVE:
# They clicked on the active column. Toggle the checkbox. We do
# this manually because setting the item checkable means the model
# changes before we catch the mouse click. This is a pain because
# we want the ensuing sorting (if the user is sorting by the
# enabled column) to keep the the selection. If the user only
# selected the column by clicking on it, then the sort happens
# before they selected it, and the resort happens without a visual
# indication of where the item went, because it never got
# selected.
state = item.checkState()
if state in (QtCore.Qt.Unchecked, QtCore.Qt.PartiallyChecked):
item.setCheckState(QtCore.Qt.Checked)
elif state == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('Invalid Check state')
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
elif parent_item is None:
# They clicked on a globals file row.
globals_file = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the close button. Close the file:
self.close_globals_file(globals_file)
else:
# They clicked on a globals group row.
globals_file = parent_item.text()
group_name = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_DELETE:
# They clicked the delete button. Delete the group:
self.delete_group(globals_file, group_name, confirm=True)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the open/close button. Which is it, open or close?
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
if group_is_open:
self.close_group(globals_file, group_name)
else:
self.open_group(globals_file, group_name)
def on_treeView_groups_doubleLeftClicked(self, index):
item = self.groups_model.itemFromIndex(index)
# The parent item, None if there is no parent:
parent_item = item.parent()
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
return
elif parent_item and item.column() == self.GROUPS_COL_NAME:
# it's a group name item. What's the group and file name?
globals_file = parent_item.text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name)
# Focus the tab:
group_tab = self.currently_open_groups[globals_file, group_name]
for i in range(self.ui.tabWidget.count()):
if self.ui.tabWidget.widget(i) is group_tab.ui:
self.ui.tabWidget.setCurrentIndex(i)
break
def on_groups_model_item_changed(self, item):
"""This function is for responding to data changes in the model. The
methods for responding to changes different columns do different
things. Mostly they make other data changes for model consistency, but
also group creation and renaming is handled in response to changes to
the 'name' column. When we change things elsewhere, we prefer to only
change one thing, and the rest of the changes are triggered here. So
here we do the following:
Be careful not to recurse unsafely into this method - changing
something that itself triggers further changes is fine so long as they
peter out and don't get stuck in a loop. If recursion needs to be
stopped, one can disconnect the signal temporarily with the context
manager self.groups_model_item_changed_disconnected. But use this
sparingly, otherwise there's the risk that some required data updates
will be forgotten about and won't happen.
"""
if item.column() == self.GROUPS_COL_NAME:
self.on_groups_model_name_changed(item)
elif item.column() == self.GROUPS_COL_ACTIVE:
self.on_groups_model_active_changed(item)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
self.on_groups_model_openclose_changed(item)
def on_groups_model_name_changed(self, item):
"""Handles group renaming and creation of new groups due to the user
editing the <click to add group> item"""
parent_item = item.parent()
# File rows are supposed to be uneditable, but just to be sure we have
# a group row:
assert parent_item is not None
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
item_text = item.text()
if item_text != self.GROUPS_DUMMY_ROW_TEXT:
# The user has made a new globals group by editing the <click
# to add group> item.
globals_file = parent_item.text()
group_name = item_text
self.new_group(globals_file, group_name)
else:
# User has renamed a globals group.
new_group_name = item.text()
previous_group_name = item.data(self.GROUPS_ROLE_PREVIOUS_NAME)
# Ensure it truly is a name change, and not something else about
# the item changing:
if new_group_name != previous_group_name:
globals_file = parent_item.text()
self.rename_group(globals_file, previous_group_name, new_group_name)
def on_groups_model_active_changed(self, item):
"""Sets the sort data for the item in response to its check state
changing. Also, if this is the first time this function has been
called on the stack, that is, the change was initiated externally
instead of via recursion from this function itself, then set the check
state of other items for consistency. This entails checking/unchecking
all group rows in response to the file row's check state changing, or
changing the file row's check state to reflect the check state of the
child group rows. That's why we need to keep track of the recursion
depth - so that those changes we make don't in turn cause further
changes. But we don't disconnect the on_changed signal altogether,
because we still want to do the update of the sort data, and anything
else that might be added in future."""
self.on_groups_model_active_changed_recursion_depth += 1
try:
check_state = item.checkState()
# Ensure sort data matches active state:
item.setData(check_state, self.GROUPS_ROLE_SORT_DATA)
if self.on_groups_model_active_changed_recursion_depth > 1:
# Prevent all below code from running in response to data changes
# initiated from within this method itself. The code above this
# check still runs in response to all changes.
return
parent_item = item.parent()
if parent_item is not None:
# A 'group active' checkbox changed due to external action (not from this method itself).
# Update the parent file checkbox to reflect the state of its children
children = [parent_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(parent_item.rowCount())]
child_states = [child.checkState() for child in children
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
parent_active_index = parent_item.index().sibling(parent_item.index().row(), self.GROUPS_COL_ACTIVE)
parent_active_item = self.groups_model.itemFromIndex(parent_active_index)
if all(state == QtCore.Qt.Checked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Unchecked)
else:
parent_active_item.setCheckState(QtCore.Qt.PartiallyChecked)
else:
# A 'file active' checkbox changed due to external action (not from this method itself).
# Update the check state of all children to match.
name_index = item.index().sibling(item.index().row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
checkstate = item.checkState()
children = [name_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(name_item.rowCount())]
for child in children:
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
child.setCheckState(checkstate)
finally:
self.on_groups_model_active_changed_recursion_depth -= 1
if self.on_groups_model_active_changed_recursion_depth == 0:
self.do_model_sort()
# Trigger a preparse to occur:
self.globals_changed()
def on_groups_model_openclose_changed(self, item):
"""Sets item sort data and icon in response to the open/close state of a group
changing."""
parent_item = item.parent()
# The open/close state of a globals group changed. It is definitely a
# group, not a file, as the open/close state of a file shouldn't be
# changing.
assert parent_item is not None # Just to be sure.
# Ensure the sort data matches the open/close state:
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
item.setData(group_is_open, self.GROUPS_ROLE_SORT_DATA)
# Set the appropriate icon and tooltip. Changing the icon causes
# itemChanged to be emitted, even if it the same icon, and even if we
# were to use the same QIcon instance. So to avoid infinite recursion
# we temporarily disconnect the signal whilst we set the icons.
with self.groups_model_item_changed_disconnected:
if group_is_open:
item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
item.setToolTip('Close globals group.')
else:
item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
item.setToolTip('Load globals group into runmanager.')
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
@inmain_decorator()
def get_default_output_folder(self):
"""Returns what the default output folder would be right now, based on
the current date and selected labscript file. Returns empty string if
no labscript file is selected. Does not create the default output
folder, does not check if it exists."""
current_labscript_file = self.ui.lineEdit_labscript_file.text()
if not current_labscript_file:
return ''
_, default_output_folder, _ = runmanager.new_sequence_details(
current_labscript_file,
config=self.exp_config,
increment_sequence_index=False,
)
default_output_folder = os.path.normpath(default_output_folder)
return default_output_folder
def rollover_shot_output_folder(self):
"""Runs in a thread, checking every 30 seconds if the default output folder has
changed, likely because the date has changed, but also possible because another
instance of runmanager has incremented the sequence index. If the defaulr output
folder has changed, and if runmanager is configured to use the default output
folder, sets the folder in which compiled shots will be put. Does not create the
folder if it does not already exist, this will be done at compile-time."""
while True:
time.sleep(30)
try:
self.check_output_folder_update()
except Exception as e:
# Don't stop the thread.
logger.exception("error checking default output folder")
@inmain_decorator()
def check_output_folder_update(self):
"""Do a single check of whether the output folder needs updating. This
is implemented as a separate function to the above loop so that the
whole check happens at once in the Qt main thread and hence is atomic
and can't be interfered with by other Qt calls in the program."""
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
# No labscript file selected:
return
currently_selected_output_folder = self.ui.lineEdit_shot_output_folder.text()
if current_default_output_folder != self.previous_default_output_folder:
# It's a new day, or a new labscript file.
# Is the user using default folders?
if currently_selected_output_folder == self.previous_default_output_folder:
# Yes they are. In that case, update to use the new folder:
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
self.previous_default_output_folder = current_default_output_folder
@inmain_decorator()
def globals_changed(self):
"""Called from either self, a GroupTab, or the RemoteServer to inform runmanager
that something about globals has changed, and that they need parsing again."""
self.ui.pushButton_engage.setEnabled(False)
self.preparse_globals_required.put(None)
def update_axes_indentation(self):
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
text = item.text().lstrip()
text = ' '*i + text
item.setText(text)
@inmain_decorator() # Is called by preparser thread
def update_axes_tab(self, expansions, dimensions):
# get set of expansions
expansion_list = []
for global_name, expansion in expansions.items():
if expansion:
if expansion == 'outer':
expansion_list.append('outer '+global_name)
else:
expansion_list.append('zip '+expansion)
expansion_list = set(expansion_list)
# find items to delete
for i in reversed(range(self.axes_model.rowCount())):
item = self.axes_model.item(i, self.AXES_COL_NAME)
name = item.data(self.AXES_ROLE_NAME)
if name not in expansion_list:
item = self.axes_model.takeRow(i)
del item
else:
length_item = self.axes_model.item(i, self.AXES_COL_LENGTH)
if name in dimensions:
length_item.setText("{}".format(dimensions[name]))
else:
length_item.setText('Unknown')
# remove from expansions list so we don't add it again
expansion_list.remove(name)
# add new rows
for expansion_name in expansion_list:
shuffle = self.ui.pushButton_shuffle.checkState() != QtCore.Qt.Unchecked
self.add_item_to_axes_model(expansion_name, shuffle, dimensions)
self.update_axes_indentation()
def add_item_to_axes_model(self, expansion_name, shuffle, dimensions = None):
if dimensions is None:
dimensions = {}
items = []
expansion_type, name = expansion_name.split()
name_item = QtGui.QStandardItem(name)
name_item.setData(expansion_name, self.AXES_ROLE_NAME)
if expansion_type == 'outer':
name_item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
else:
name_item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
items.append(name_item)
length = 'Unknown'
if expansion_name in dimensions:
length = "{}".format(dimensions[expansion_name])
length_item = QtGui.QStandardItem(length)
items.append(length_item)
shuffle_item = QtGui.QStandardItem()
shuffle_item.setCheckable(True)
shuffle_item.setCheckState(QtCore.Qt.Checked if shuffle else QtCore.Qt.Unchecked)
items.append(shuffle_item)
self.axes_model.appendRow(items)
@inmain_decorator() # Is called by preparser thread
def update_tabs_parsing_indication(self, active_groups, sequence_globals, evaled_globals, n_shots):
for group_tab in self.currently_open_groups.values():
group_tab.update_parse_indication(active_groups, sequence_globals, evaled_globals)
self.ui.pushButton_engage.setEnabled(True)
if n_shots == 1:
n_shots_string = '(1 shot)'
else:
n_shots_string = '({} shots)'.format(n_shots)
self.ui.pushButton_engage.setText('Engage {}'.format(n_shots_string))
def preparse_globals(self):
active_groups = self.get_active_groups()
if active_groups is None:
# There was an error, get_active_groups has already shown
# it to the user.
return
# Expansion mode is automatically updated when the global's
# type changes. If this occurs, we will have to parse again to
# include the change:
while True:
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=False, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
self.n_shots = len(shots)
expansions_changed = self.guess_expansion_modes(
active_groups, evaled_globals, global_hierarchy, expansions)
if not expansions_changed:
# Now expand globals while parsing to calculate the number of shots.
# this must only be done after the expansion type guessing has been updated to avoid exceptions
# when changing a zip group from a list to a single value
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=True, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
self.n_shots = len(shots)
break
self.update_tabs_parsing_indication(active_groups, sequence_globals, evaled_globals, self.n_shots)
self.update_axes_tab(expansions, dimensions)
def preparse_globals_loop(self):
"""Runs in a thread, waiting on a threading.Event that tells us when
some globals have changed, and calls parse_globals to evaluate them
all before feeding the results back to the relevant tabs to be
displayed."""
while True:
try:
# Wait until we're needed:
self.preparse_globals_required.get()
n_requests = 1
# Wait until the main thread is idle before clearing the queue of
# requests. This way if preparsing is triggered multiple times within
# the main thread before it becomes idle, we can respond to this all at
# once, once they are all done, rather than starting too early and
# having to preparse again.
with qtlock:
while True:
try:
self.preparse_globals_required.get(block=False)
n_requests += 1
except queue.Empty:
break
# Do some work:
self.preparse_globals()
# Tell any callers calling preparse_globals_required.join() that we are
# done with their request:
for _ in range(n_requests):
self.preparse_globals_required.task_done()
except Exception:
# Raise the error, but keep going so we don't take down the
# whole thread if there is a bug.
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
def wait_until_preparse_complete(self):
"""Block until the preparse loop has finished pending work"""
self.preparse_globals_required.join()
def get_group_item_by_name(self, globals_file, group_name, column, previous_name=None):
"""Returns an item from the row representing a globals group in the
groups model. Which item is returned is set by the column argument."""
parent_item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
possible_name_items = self.groups_model.findItems(group_name, QtCore.Qt.MatchRecursive,
column=self.GROUPS_COL_NAME)
# Don't accidentally match on other groups or files with the same name
# as this group:
possible_name_items = [item for item in possible_name_items if item.parent() == parent_item]
if previous_name is not None:
# Also filter by previous name, useful for telling rows apart when
# a rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GROUPS_ROLE_PREVIOUS_NAME) == previous_name]
elif group_name != self.GROUPS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new group is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.groups_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.treeView_groups.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView_groups.sortByColumn(sort_column, sort_order)
@inmain_decorator() # Can be called from a non-main thread
def get_active_groups(self, interactive=True):
"""Returns active groups in the format {group_name: globals_file}.
Displays an error dialog and returns None if multiple groups of the
same name are selected, this is invalid - selected groups must be
uniquely named. If interactive=False, raises the exception instead."""
active_groups = {}
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
group_name = group_name_item.text()
globals_file = file_name_item.text()
if group_name in active_groups:
msg = (
'There are two active groups named %s. ' % group_name
+ 'Active groups must have unique names.'
)
if interactive:
error_dialog(msg)
return
raise RuntimeError(msg)
active_groups[group_name] = globals_file
return active_groups
def open_globals_file(self, globals_file):
# Do nothing if this file is already open:
if self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME):
return
# Get the groups:
groups = runmanager.get_grouplist(globals_file)
# Add the parent row:
file_name_item = QtGui.QStandardItem(globals_file)
file_name_item.setEditable(False)
file_name_item.setToolTip(globals_file)
# Sort column by name:
file_name_item.setData(globals_file, self.GROUPS_ROLE_SORT_DATA)
file_active_item = QtGui.QStandardItem()
file_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated when checkstate changes:
file_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
file_active_item.setEditable(False)
file_active_item.setToolTip('Check to set all the file\'s groups as active.')
file_delete_item = QtGui.QStandardItem() # Blank, only groups have a delete button
file_delete_item.setEditable(False)
# Must be set to something so that the dummy row doesn't get sorted first:
file_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
file_close_item = QtGui.QStandardItem()
file_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
file_close_item.setEditable(False)
file_close_item.setToolTip('Close globals file.')
self.groups_model.appendRow([file_name_item, file_active_item, file_delete_item, file_close_item])
# Add the groups as children:
for group_name in groups:
row = self.make_group_row(group_name)
file_name_item.appendRow(row)
# Finally, add the <Click to add group> row at the bottom:
dummy_name_item = QtGui.QStandardItem(self.GROUPS_DUMMY_ROW_TEXT)
dummy_name_item.setToolTip('Click to add group')
# This lets later code know that this row does
# not correspond to an actual globals group:
dummy_name_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GROUPS_DUMMY_ROW_TEXT, self.GROUPS_ROLE_PREVIOUS_NAME)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_active_item = QtGui.QStandardItem()
dummy_active_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_active_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item = QtGui.QStandardItem()
dummy_delete_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_open_close_item = QtGui.QStandardItem()
dummy_open_close_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_open_close_item.setFlags(QtCore.Qt.NoItemFlags)
# Not setting anything as the above items' sort role has the effect of
# ensuring this row is always sorted to the end of the list, without
# us having to implement any custom sorting methods or subclassing
# anything, yay.
file_name_item.appendRow([dummy_name_item, dummy_active_item, dummy_delete_item, dummy_open_close_item])
# Expand the child items to be visible:
self.ui.treeView_groups.setExpanded(file_name_item.index(), True)
self.globals_changed()
self.do_model_sort()
# If this changed the sort order, ensure the file item is visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, file_name_item)
def make_group_row(self, group_name):
"""Returns a new row representing one group in the groups tab, ready to be
inserted into the model."""
group_name_item = QtGui.QStandardItem(group_name)
# We keep the previous name around so that we can detect what changed:
group_name_item.setData(group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
# Sort column by name:
group_name_item.setData(group_name, self.GROUPS_ROLE_SORT_DATA)
group_active_item = QtGui.QStandardItem()
group_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated whenever the
# checkstate changes:
group_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
group_active_item.setEditable(False)
group_active_item.setToolTip(
'Whether or not the globals within this group should be used by runmanager for compilation.')
group_delete_item = QtGui.QStandardItem()
group_delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
group_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_delete_item.setEditable(False)
group_delete_item.setToolTip('Delete globals group from file.')
group_open_close_item = QtGui.QStandardItem()
group_open_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
group_open_close_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Sort column by whether group is open - must keep this manually
# updated when the state changes:
group_open_close_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_open_close_item.setEditable(False)
group_open_close_item.setToolTip('Load globals group into runmananger.')
row = [group_name_item, group_active_item, group_delete_item, group_open_close_item]
return row
def close_globals_file(self, globals_file, confirm=True):
item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
# Close any open groups in this globals file:
child_name_items = [item.child(i, self.GROUPS_COL_NAME) for i in range(item.rowCount())]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE) for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if confirm and any(child_is_open):
if not question_dialog('Close %s? This will close %d currently open group(s).' %
(globals_file, child_is_open.count(True))):
return
to_close = [name_item for name_item, is_open in zip(child_name_items, child_is_open) if is_open]
for name_item in to_close:
group_name = name_item.text()
self.close_group(globals_file, group_name)
# Remove the globals file from the model:
self.groups_model.removeRow(item.row())
self.globals_changed()
def copy_group(self, source_globals_file, source_group_name, dest_globals_file=None, delete_source_group=False):
"""This function copys a group of globals with the name source_group_name from the file
source_globals_file to a new file dest_globals_file. If delete_source_group is True
the source group is deleted after copying"""
if delete_source_group and source_globals_file == dest_globals_file:
return
try:
dest_group_name = runmanager.copy_group(source_globals_file, source_group_name, dest_globals_file, delete_source_group)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the new globals file.
if dest_globals_file is None:
dest_globals_file = source_globals_file
# find the new groups parent row by filepath
for index in range(self.groups_model.rowCount()):
if self.groups_model.item(index, self.GROUPS_COL_NAME).text() == dest_globals_file:
parent_row = self.groups_model.item(index)
break
last_index = parent_row.rowCount()
# Insert it as the row before the last (dummy) row:
group_row = self.make_group_row(dest_group_name)
parent_row.insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group
self.open_group(dest_globals_file, dest_group_name)
name_item = group_row[self.GROUPS_COL_NAME]
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# delete original
if delete_source_group:
self.delete_group(source_globals_file, source_group_name, confirm=False)
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)
def new_group(self, globals_file, group_name):
item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME,
previous_name=self.GROUPS_DUMMY_ROW_TEXT)
try:
runmanager.new_group(globals_file, group_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the globals file it belong to.
group_row = self.make_group_row(group_name)
last_index = item.parent().rowCount()
# Insert it as the row before the last (dummy) row:
item.parent().insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group and mark it active:
self.open_group(globals_file, group_name)
active_item = group_row[self.GROUPS_COL_ACTIVE]
name_item = group_row[self.GROUPS_COL_NAME]
active_item.setCheckState(QtCore.Qt.Checked)
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, name_item)
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GROUPS_DUMMY_ROW_TEXT)
def open_group(self, globals_file, group_name, trigger_preparse=True):
assert (globals_file, group_name) not in self.currently_open_groups # sanity check
group_tab = GroupTab(self.ui.tabWidget, globals_file, group_name)
self.currently_open_groups[globals_file, group_name] = group_tab
# Set the open/close state in the groups_model. itemChanged will be
# emitted and self.on_groups_model_item_changed will handle updating
# the other data roles, icons etc:
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(True, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Trigger a preparse to occur in light of this. Calling code can
# disable this so that multiple groups can be opened at once without
# triggering a preparse. If they do so, they should call
# self.globals_changed() themselves.
if trigger_preparse:
self.globals_changed()
def rename_group(self, globals_file, previous_group_name, new_group_name):
item = self.get_group_item_by_name(globals_file, new_group_name, self.GROUPS_COL_NAME,
previous_name=previous_group_name)
try:
runmanager.rename_group(globals_file, previous_group_name, new_group_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_group_name)
else:
item.setData(new_group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
item.setData(new_group_name, self.GROUPS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the group item is still visible:
scroll_view_to_row_if_current(self.ui.treeView_groups, item)
group_tab = self.currently_open_groups.pop((globals_file, previous_group_name), None)
if group_tab is not None:
# Change labels and tooltips appropriately if the group is open:
group_tab.set_file_and_group_name(globals_file, new_group_name)
# Re-add it to the dictionary under the new name:
self.currently_open_groups[globals_file, new_group_name] = group_tab
def close_group(self, globals_file, group_name):
group_tab = self.currently_open_groups.pop((globals_file, group_name), None)
assert group_tab is not None # Just in case
group_tab.close()
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
def delete_group(self, globals_file, group_name, confirm=True):
if confirm:
if not question_dialog("Delete the group '%s'?" % group_name):
return
# If the group is open, close it:
group_tab = self.currently_open_groups.get((globals_file, group_name))
if group_tab is not None:
self.close_group(globals_file, group_name)
runmanager.delete_group(globals_file, group_name)
# Find the entry for this group in self.groups_model and remove it:
name_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
name_item.parent().removeRow(name_item.row())
self.globals_changed()
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current runmanager configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
self.ui.actionSave_configuration.setText('Save configuration {}'.format(save_file))
def get_save_data(self):
# Get the currently open files and active groups:
h5_files_open = []
active_groups = []
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
globals_file_name = file_name_item.text()
h5_files_open.append(globals_file_name)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_name = group_name_item.text()
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
active_groups.append((globals_file_name, group_name))
# Get the currently open groups:
groups_open = []
for i in range(self.ui.tabWidget.count()):
tab_page = self.ui.tabWidget.widget(i)
for (globals_file_name, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
groups_open.append((globals_file_name, group_name))
break
# Get the labscript file, output folder, and whether the output folder
# is default:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
shot_output_folder = self.ui.lineEdit_shot_output_folder.text()
is_using_default_shot_output_folder = (shot_output_folder == self.get_default_output_folder())
# Only save the shot output folder if not using the default, that way
# the folder updating as the day rolls over will not be detected as a
# change to the save data:
if is_using_default_shot_output_folder:
shot_output_folder = ''
# Get the server hostnames:
blacs_host = self.ui.lineEdit_BLACS_hostname.text()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
send_to_blacs = self.ui.checkBox_run_shots.isChecked()
shuffle = self.ui.pushButton_shuffle.isChecked()
# axes tab information
axes = []
for i in range(self.axes_model.rowCount()):
name_item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
shuffle_state = shuffle_item.checkState()
axes.append((name_item.data(self.AXES_ROLE_NAME), 1 if shuffle_state == QtCore.Qt.Checked else 0))
save_data = {'h5_files_open': h5_files_open,
'active_groups': active_groups,
'groups_open': groups_open,
'current_labscript_file': current_labscript_file,
'shot_output_folder': shot_output_folder,
'is_using_default_shot_output_folder': is_using_default_shot_output_folder,
'send_to_runviewer': send_to_runviewer,
'send_to_blacs': send_to_blacs,
'shuffle': shuffle,
'axes': axes,
'blacs_host': blacs_host}
return save_data
def save_configuration(self, save_file):
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
save_appconfig(save_file, {'runmanager_state': save_data})
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select runmanager configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s'%filename)
# Close all files:
save_data = self.get_save_data()
for globals_file in save_data['h5_files_open']:
self.close_globals_file(globals_file, confirm=False)
# Ensure folder exists, if this was opened programmatically we are
# creating the file, so the directory had better exist!
runmanager_config = load_appconfig(filename).get('runmanager_state', {})
has_been_a_warning = [False]
def warning(message):
if not has_been_a_warning[0]:
has_been_a_warning[0] = True
self.output_box.output('\n')
self.output_box.output('Warning: %s\n' % message, red=True)
for globals_file in runmanager_config.get('h5_files_open', []):
if os.path.exists(globals_file):
try:
self.open_globals_file(globals_file)
self.last_opened_globals_folder = os.path.dirname(globals_file)
except Exception:
raise_exception_in_thread(sys.exc_info())
continue
else:
self.output_box.output('\nWarning: globals file %s no longer exists\n' % globals_file, red=True)
for globals_file, group_name in runmanager_config.get('active_groups', []):
try:
group_active_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_ACTIVE)
group_active_item.setCheckState(QtCore.Qt.Checked)
except LookupError:
warning("previously active group '%s' in %s no longer exists" % (group_name, globals_file))
for globals_file, group_name in runmanager_config.get('groups_open', []):
# First check if it exists:
try:
self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
except LookupError:
warning("previously open group '%s' in %s no longer exists" % (group_name, globals_file))
else:
self.open_group(globals_file, group_name)
current_labscript_file = runmanager_config.get('current_labscript_file')
if current_labscript_file is not None:
if os.path.exists(current_labscript_file):
self.ui.lineEdit_labscript_file.setText(current_labscript_file)
self.last_opened_labscript_folder = os.path.dirname(current_labscript_file)
elif current_labscript_file:
warning('previously selected labscript file %s no longer exists' % current_labscript_file)
shot_output_folder = runmanager_config.get('shot_output_folder')
if shot_output_folder is not None:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
if runmanager_config.get('is_using_default_shot_output_folder', False):
default_output_folder = self.get_default_output_folder()
self.ui.lineEdit_shot_output_folder.setText(default_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(default_output_folder)
send_to_runviewer = runmanager_config.get('send_to_runviewer')
if send_to_runviewer is not None:
self.ui.checkBox_view_shots.setChecked(send_to_runviewer)
send_to_blacs = runmanager_config.get('send_to_blacs')
if send_to_blacs is not None:
self.ui.checkBox_run_shots.setChecked(send_to_blacs)
# clear the axes model first
if self.axes_model.rowCount():
self.axes_model.removeRows(0, self.axes_model.rowCount())
# set the state of the global shuffle button. This ensure that if no axes items get loaded afterwards
# (e.g. because the globals in the .ini file are no longer expansion globals), then we still have
# an approximate state for the shuffle button that will apply to whatever globals are to be expanded.
if runmanager_config.get('shuffle', False):
self.ui.pushButton_shuffle.setChecked(True)
# Now load the axes states (order and shuffle). This will also ensure the shuffle button matches the
# state of these items (since we don't save/restore the tri-state nature of the global shuffle button
axes = runmanager_config.get('axes')
if axes is not None and isinstance(axes, list):
# clear model
for name, shuffle in axes:
self.add_item_to_axes_model(name, shuffle)
self.update_axes_indentation()
blacs_host = runmanager_config.get('blacs_host')
if blacs_host is not None:
self.ui.lineEdit_BLACS_hostname.setText(blacs_host)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def compile_loop(self):
while True:
try:
labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer = self.compile_queue.get()
run_files = iter(run_files) # Should already be in iterator but just in case
while True:
if self.compilation_aborted.is_set():
self.output_box.output('Compilation aborted.\n\n', red=True)
break
try:
try:
# We do next() instead of looping over run_files
# so that if compilation is aborted we won't
# create an extra file unnecessarily.
run_file = next(run_files)
except StopIteration:
self.output_box.output('Ready.\n\n')
break
else:
self.to_child.put(['compile', [labscript_file, run_file]])
signal, success = self.from_child.get()
assert signal == 'done'
if not success:
self.compilation_aborted.set()
continue
if send_to_BLACS:
self.send_to_BLACS(run_file, BLACS_host)
if send_to_runviewer:
self.send_to_runviewer(run_file)
except Exception as e:
self.output_box.output(str(e) + '\n', red=True)
self.compilation_aborted.set()
inmain(self.ui.pushButton_abort.setEnabled, False)
self.compilation_aborted.clear()
except Exception:
# Raise it so whatever bug it is gets seen, but keep going so
# the thread keeps functioning:
exc_info = sys.exc_info()
raise_exception_in_thread(exc_info)
continue
def parse_globals(self, active_groups, raise_exceptions=True, expand_globals=True, expansion_order = None, return_dimensions = False):
sequence_globals = runmanager.get_globals(active_groups)
#logger.info('got sequence globals')
evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(sequence_globals, raise_exceptions)
#logger.info('evaluated sequence globals')
if expand_globals:
if return_dimensions:
shots, dimensions = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order, return_dimensions=return_dimensions)
else:
shots = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order)
else:
shots = []
dimensions = {}
#logger.info('expanded sequence globals')
if return_dimensions:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions
else:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions
def guess_expansion_modes(self, active_groups, evaled_globals, global_hierarchy, expansions):
"""This function is designed to be called iteratively. It changes the
expansion type of globals that reference other globals - such that
globals referencing an iterable global will be zipped with it, rather
than outer producted. Each time this method is called,
self.parse_globals should also be called, so that the globals are
evaluated with their new expansion modes, if they changed. This should
be performed repeatedly until there are no more changes. Note that
this method does not return what expansion types it thinks globals
should have - it *actually writes them to the globals HDF5 file*. So
it is up to later code to ensure it re-reads the expansion mode from
the HDF5 file before proceeding. At present this method is only called
from self.preparse_globals(), so see there to see how it fits in with
everything else. This method uses four instance attributes to store
state: self.previous_evaled_globals, self.previous_global_hierarchy,
self.previous_expansion_types and self.previous_expansions. This is
neccesary so that it can detect changes."""
# Do nothing if there were exceptions:
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
value = evaled_globals[group_name][global_name]
if isinstance(value, Exception):
# Let ExpansionErrors through through, as they occur
# when the user has changed the value without changing
# the expansion type:
if isinstance(value, runmanager.ExpansionError):
continue
return False
# Did the guessed expansion type for any of the globals change?
expansion_types_changed = False
expansion_types = {}
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
new_value = evaled_globals[group_name][global_name]
try:
previous_value = self.previous_evaled_globals[group_name][global_name]
except KeyError:
# This variable is used to guess the expansion type
#
# If we already have an expansion specified for this, but
# don't have a previous value, then we should use the
# new_value for the guess as we are likely loading from HDF5
# file for the first time (and either way, don't want to
# overwrite what the user has put in the expansion type)
#
# If we don't have an expansion...
# then we set it to '0' which will result in an
# expansion type guess of '' (emptys string) This will
# either result in nothing being done to the expansion
# type or the expansion type being found to be 'outer',
# which will then make it go through the machinery below
if global_name in expansions and expansions[global_name]:
previous_value = new_value
else:
previous_value = 0
new_guess = runmanager.guess_expansion_type(new_value)
previous_guess = runmanager.guess_expansion_type(previous_value)
if new_guess == 'outer':
expansion_types[global_name] = {'previous_guess': previous_guess,
'new_guess': new_guess,
'group_name': group_name,
'value': new_value
}
elif new_guess != previous_guess:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, new_guess)
expansions[global_name] = new_guess
expansion_types_changed = True
# recursively find dependencies and add them to a zip group!
def find_dependencies(global_name, global_hierarchy, expansion_types):
results = set()
for name, dependencies in global_hierarchy.items():
if name in expansion_types and global_name in dependencies:
results.add(name)
results = results.union(find_dependencies(name, global_hierarchy, expansion_types))
return results
def global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions):
if global_name not in global_hierarchy:
return False
else:
for dependency in global_hierarchy[global_name]:
if expansions[dependency]:
return True
def set_expansion_type_guess(expansion_types, expansions, global_name, expansion_to_set, new=True):
if new:
key = 'new_guess'
else:
key = 'previous_guess'
# debug logging
log_if_global(global_name, [], 'setting expansion type for new dependency' if new else 'setting expansion type for old dependencies')
# only do this if the expansion is *not* already set to a specific zip group
if global_name in expansions and expansions[global_name] != '' and expansions[global_name] != 'outer':
expansion_types[global_name][key] = expansions[global_name]
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansions[global_name], global_name))
else:
expansion_types[global_name][key] = expansion_to_set
expansions[global_name] = expansion_to_set
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansion_to_set, global_name))
for global_name in sorted(expansion_types):
# we have a global that does not depend on anything that has an
# expansion type of 'outer'
if (not global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions)
and not isinstance(expansion_types[global_name]['value'], runmanager.ExpansionError)):
current_dependencies = find_dependencies(global_name, global_hierarchy, expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if current_dependencies:
for dependency in current_dependencies:
set_expansion_type_guess(expansion_types, expansions, dependency, str(global_name))
set_expansion_type_guess(expansion_types, expansions, global_name, str(global_name))
for global_name in sorted(self.previous_expansion_types):
if (not global_depends_on_global_with_outer_product(
global_name, self.previous_global_hierarchy, self.previous_expansions)
and not isinstance(self.previous_expansion_types[global_name]['value'], runmanager.ExpansionError)):
old_dependencies = find_dependencies(global_name, self.previous_global_hierarchy, self.previous_expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if old_dependencies:
for dependency in old_dependencies:
if dependency in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, dependency, str(global_name), new=False)
if global_name in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, global_name, str(global_name), new=False)
for global_name, guesses in expansion_types.items():
if guesses['new_guess'] != guesses['previous_guess']:
filename = active_groups[guesses['group_name']]
runmanager.set_expansion(
filename, str(guesses['group_name']), str(global_name), str(guesses['new_guess']))
expansions[global_name] = guesses['new_guess']
expansion_types_changed = True
# Now check everything that has an expansion type not equal to outer.
# If it has one, but is not iteratble, remove it from teh zip group
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
if expansions[global_name] and expansions[global_name] != 'outer':
try:
iter(evaled_globals[group_name][global_name])
except Exception:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, '')
expansion_types_changed = True
self.previous_evaled_globals = evaled_globals
self.previous_global_hierarchy = global_hierarchy
self.previous_expansion_types = expansion_types
self.previous_expansions = expansions
return expansion_types_changed
def make_h5_files(self, labscript_file, output_folder, sequence_globals, shots, shuffle):
sequence_attrs, default_output_dir, filename_prefix = runmanager.new_sequence_details(
labscript_file, config=self.exp_config, increment_sequence_index=True
)
if output_folder == self.previous_default_output_folder:
# The user is using dthe efault output folder. Just in case the sequence
# index has been updated or the date has changed, use the default_output dir
# obtained from new_sequence_details, as it is race-free, whereas the one
# from the UI may be out of date since we only update it once a second.
output_folder = default_output_dir
self.check_output_folder_update()
run_files = runmanager.make_run_files(
output_folder,
sequence_globals,
shots,
sequence_attrs,
filename_prefix,
shuffle,
)
logger.debug(run_files)
return labscript_file, run_files
def send_to_BLACS(self, run_file, BLACS_hostname):
port = int(self.exp_config.get('ports', 'BLACS'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
self.output_box.output('Submitting run file %s.\n' % os.path.basename(run_file))
try:
response = zmq_get(port, BLACS_hostname, data=agnostic_path)
if 'added successfully' in response:
self.output_box.output(response)
else:
raise Exception(response)
except Exception as e:
self.output_box.output('Couldn\'t submit job to control server: %s\n' % str(e), red=True)
self.compilation_aborted.set()
def send_to_runviewer(self, run_file):
runviewer_port = int(self.exp_config.get('ports', 'runviewer'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
try:
response = zmq_get(runviewer_port, 'localhost', data='hello', timeout=1)
if 'hello' not in response:
raise Exception(response)
except Exception as e:
logger.info('runviewer not running, attempting to start...')
# Runviewer not running, start it:
if os.name == 'nt':
creationflags = 0x00000008 # DETACHED_PROCESS from the win32 API
scripts_dir = desktop_app.environment.get_scripts_dir('runviewer')
subprocess.Popen([str(scripts_dir / 'runviewer-gui')],
creationflags=creationflags, stdout=None, stderr=None,
close_fds=True)
else:
devnull = open(os.devnull, 'w')
if not os.fork():
os.setsid()
subprocess.Popen([sys.executable, '-m', 'runviewer'],
stdin=devnull, stdout=devnull, stderr=devnull, close_fds=True)
os._exit(0)
try:
zmq_get(runviewer_port, 'localhost', data='hello', timeout=15)
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
try:
response = zmq_get(runviewer_port, 'localhost', data=agnostic_path, timeout=0.5)
if 'ok' not in response:
raise Exception(response)
else:
self.output_box.output('Shot %s sent to runviewer.\n' % os.path.basename(run_file))
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
class RemoteServer(ZMQServer):
def __init__(self):
port = app.exp_config.getint(
'ports', 'runmanager', fallback=runmanager.remote.DEFAULT_PORT
)
ZMQServer.__init__(self, port=port)
### Added handling of lambda expressions - Avikar
def handle_get_globals(self, raw=False):
active_groups = inmain(app.get_active_groups, interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
all_globals = {}
if raw:
for group_globals in sequence_globals.values():
values_only = {name: val if not callable(val) else "<function>" for name, (val, _, _) in group_globals.items()}
all_globals.update(values_only)
else:
evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(
sequence_globals, raise_exceptions=False
)
for group_globals in evaled_globals.values():
all_globals.update(group_globals)
for name, val in all_globals.items():
if callable(val):
all_globals[name] = "<function>"
return all_globals
@inmain_decorator()
def handle_set_globals(self, globals, raw=False):
active_groups = app.get_active_groups(interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
try:
for global_name, new_value in globals.items():
# Unless raw=True, convert to str representation for saving to the GUI
# or file. If this does not result in an object the user can actually
# use, evaluation will error and the caller will find out about it later
if not raw:
new_value = repr(new_value)
elif not isinstance(new_value, (str, bytes)):
msg = "global %s must be a string if raw=True, not %s"
raise TypeError(msg % (global_name, new_value.__class__.__name__))
# Find the group this global is in:
for group_name, group_globals in sequence_globals.items():
globals_file = active_groups[group_name]
if global_name in group_globals:
# Confirm it's not also in another group:
for other_name, other_globals in sequence_globals.items():
if other_globals is not group_globals:
if global_name in other_globals:
msg = """Cannot set global %s, it is defined in
multiple active groups: %s and %s"""
msg = msg % (global_name, group_name, other_name)
raise RuntimeError(dedent(msg))
previous_value, _, _ = sequence_globals[group_name][global_name]
# Append expression-final comments in the previous expression to
# the new one:
comments = runmanager.find_comments(previous_value)
if comments:
# Only the final comment
comment_start, comment_end = comments[-1]
# Only if the comment is the last thing in the expression:
if comment_end == len(previous_value):
new_value += previous_value[comment_start:comment_end]
try:
# Is the group open?
group_tab = app.currently_open_groups[
globals_file, group_name
]
except KeyError:
# Group is not open. Change the global value on disk:
runmanager.set_value(
globals_file, group_name, global_name, new_value
)
else:
# Group is open. Change the global value via the GUI:
group_tab.change_global_value(
global_name,
previous_value,
new_value,
interactive=False,
)
break
else:
# Global was not found.
msg = "Global %s not found in any active group" % global_name
raise ValueError(msg)
finally:
# Trigger preparsing of globals to occur so that changes in globals not in
# open tabs are reflected in the GUI, such as n_shots, errors on other
# globals that depend on them, etc.
app.globals_changed()
def handle_engage(self):
app.wait_until_preparse_complete()
inmain(app.on_engage_clicked)
@inmain_decorator()
def handle_abort(self):
app.on_abort_clicked()
@inmain_decorator()
def handle_get_run_shots(self):
return app.ui.checkBox_run_shots.isChecked()
@inmain_decorator()
def handle_set_run_shots(self, value):
app.ui.checkBox_run_shots.setChecked(value)
@inmain_decorator()
def handle_get_view_shots(self):
return app.ui.checkBox_view_shots.isChecked()
@inmain_decorator()
def handle_set_view_shots(self, value):
app.ui.checkBox_view_shots.setChecked(value)
@inmain_decorator()
def handle_get_shuffle(self):
return app.ui.pushButton_shuffle.isChecked()
@inmain_decorator()
def handle_set_shuffle(self, value):
app.ui.pushButton_shuffle.setChecked(value)
def handle_n_shots(self):
# Wait until any current preparsing is done, to ensure this is not racy w.r.t
# previous remote calls:
app.wait_until_preparse_complete()
return app.n_shots
@inmain_decorator()
def handle_get_labscript_file(self):
labscript_file = app.ui.lineEdit_labscript_file.text()
return os.path.abspath(labscript_file)
@inmain_decorator()
def handle_set_labscript_file(self, value):
labscript_file = os.path.abspath(value)
app.ui.lineEdit_labscript_file.setText(labscript_file)
@inmain_decorator()
def handle_get_shot_output_folder(self):
shot_output_folder = app.ui.lineEdit_shot_output_folder.text()
return os.path.abspath(shot_output_folder)
@inmain_decorator()
def handle_set_shot_output_folder(self, value):
shot_output_folder = os.path.abspath(value)
app.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
def handle_error_in_globals(self):
try:
# This will raise an exception if there are multiple active groups of the
# same name:
active_groups = inmain(app.get_active_groups, interactive=False)
sequence_globals = runmanager.get_globals(active_groups)
# This will raise an exception if any of the globals can't be evaluated:
runmanager.evaluate_globals(sequence_globals, raise_exceptions=True)
except Exception:
return True
return False
def handle_is_output_folder_default(self):
return not app.non_default_folder
@inmain_decorator()
def handle_reset_shot_output_folder(self):
app.on_reset_shot_output_folder_clicked(None)
def handler(self, request_data):
cmd, args, kwargs = request_data
if cmd == 'hello':
return 'hello'
elif cmd == '__version__':
return runmanager.__version__
try:
return getattr(self, 'handle_' + cmd)(*args, **kwargs)
except Exception as e:
msg = traceback.format_exc()
msg = "Runmanager server returned an exception:\n" + msg
return e.__class__(msg)
if __name__ == "__main__":
logger = setup_logging('runmanager')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication.instance()
if qapplication is None:
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = RunManager()
splash.update_text('Starting remote server')
remote_server = RemoteServer()
splash.hide()
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None)
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
qapplication.exec_()
remote_server.shutdown()
|
vnrpc.py | # encoding: UTF-8
import threading
import traceback
import signal
import zmq
from msgpack import packb, unpackb
from json import dumps, loads
import cPickle
pDumps = cPickle.dumps
pLoads = cPickle.loads
# 实现Ctrl-c中断recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
########################################################################
class RpcObject(object):
"""
---RPC对象
提供对数据的序列化打包和解包接口。
msgpack:性能更高,但通常需要安装msgpack相关工具;
json:性能略低但通用性更好,大部分编程语言都内置了相关的库。
cPickle:性能一般且仅能用于Python,但是可以直接传送Python对象,非常方便。
因此建议尽量使用msgpack,如果要和某些语言通讯没有提供msgpack时再使用json,
当传送的数据包含很多自定义的Python对象时建议使用cPickle。
使用其他序列化工具请自行添加。
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 默认使用msgpack作为序列化工具
#self.useMsgpack()
self.usePickle()
#----------------------------------------------------------------------
def pack(self, data):
"""打包"""
pass
#----------------------------------------------------------------------
def unpack(self, data):
"""解包"""
pass
#----------------------------------------------------------------------
def __jsonPack(self, data):
"""使用json打包"""
return dumps(data)
#----------------------------------------------------------------------
def __jsonUnpack(self, data):
"""使用json解包"""
return loads(data)
#----------------------------------------------------------------------
def __msgpackPack(self, data):
"""使用msgpack打包"""
return packb(data)
#----------------------------------------------------------------------
def __msgpackUnpack(self, data):
"""使用msgpack解包"""
return unpackb(data)
#----------------------------------------------------------------------
def __picklePack(self, data):
"""使用cPickle打包"""
return pDumps(data)
#----------------------------------------------------------------------
def __pickleUnpack(self, data):
"""使用cPickle解包"""
return pLoads(data)
#----------------------------------------------------------------------
def useJson(self):
"""使用json作为序列化工具"""
print( 'Use Json Serialization')
self.pack = self.__jsonPack
self.unpack = self.__jsonUnpack
#----------------------------------------------------------------------
def useMsgpack(self):
"""使用msgpack作为序列化工具"""
print( 'Use MsgPack Serialization')
self.pack = self.__msgpackPack
self.unpack = self.__msgpackUnpack
#----------------------------------------------------------------------
def usePickle(self):
"""使用cPickle作为序列化工具"""
print( 'Use Pickle Serialization')
self.pack = self.__picklePack
self.unpack = self.__pickleUnpack
########################################################################
class RpcServer(RpcObject):
"""
---RPC服务器
初始化RpcServer
提供RpcServer:启动(启动线程)、停止(停止线程)、运行、推送数据到客户端、注册功能函数
"""
#----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
# 调用RpcObject的构造器:self.usePickle()
super(RpcServer, self).__init__()
# 保存功能函数的字典,key是函数名,value是函数对象
self.__functions = {}
# zmq端口相关
self.__context = zmq.Context()
self.__socketREP = self.__context.socket(zmq.REP) # 请求回应socket
self.__socketREP.bind(repAddress)
self.__socketPUB = self.__context.socket(zmq.PUB) # 数据广播socket
self.__socketPUB.bind(pubAddress)
# 工作线程相关
self.__active = False # 服务器的工作状态
self.__thread = threading.Thread(target=self.run) # 服务器的工作线程
#----------------------------------------------------------------------
def start(self):
"""启动服务器"""
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止服务器"""
# 将服务器设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""服务器运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketREP.poll(1000):
continue
# 从请求响应socket收取请求数据
reqb = self.__socketREP.recv()
# 序列化解包
req = self.unpack(reqb)
# 获取函数名和参数
name, args, kwargs = req
# 获取引擎中对应的函数对象,并执行调用,如果有异常则捕捉后返回
try:
func = self.__functions[name]
r = func(*args, **kwargs)
rep = [True, r]
except Exception as e:
rep = [False, traceback.format_exc()]
# 序列化打包
repb = self.pack(rep)
# 通过请求响应socket返回调用结果
self.__socketREP.send(repb)
#----------------------------------------------------------------------
def publish(self, topic, data):
"""
广播推送数据
topic:主题内容
data:具体的数据
"""
# 序列化数据(打包数据)
datab = self.pack(data)
if len(topic) > 0:
topic = topic.encode('utf-8')
# 通过广播socket发送数据
self.__socketPUB.send_multipart([topic, datab])
#----------------------------------------------------------------------
def register(self, func):
"""注册函数"""
self.__functions[func.__name__] = func
########################################################################
class RpcClient(RpcObject):
"""
---RPC客户端
初始化RpcClient
提供RpcClient:远程调用功能、启动(启动线程)、停止(停止线程)、运行、订阅广播数据
"""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(RpcClient, self).__init__()
# zmq端口相关
self.__reqAddress = reqAddress
self.__subAddress = subAddress
self.__context = zmq.Context()
self.__socketREQ = self.__context.socket(zmq.REQ) # 请求发出socket
self.__socketSUB = self.__context.socket(zmq.SUB) # 广播订阅socket
# 工作线程相关,用于处理服务器推送的数据
self.__active = False # 客户端的工作状态
self.__thread = threading.Thread(target=self.run) # 客户端的工作线程
#----------------------------------------------------------------------
def __getattr__(self, name):
"""实现远程调用功能"""
# 执行远程调用任务
def dorpc(*args, **kwargs):
# 生成请求
req = [name, args, kwargs]
# 序列化打包请求
reqb = self.pack(req)
# 发送请求并等待回应
self.__socketREQ.send(reqb)
repb = self.__socketREQ.recv()
# 序列化解包回应
rep = self.unpack(repb)
# 若正常则返回结果,调用失败则触发异常
if rep[0]:
return rep[1]
else:
raise RemoteException(rep[1])
return dorpc
#----------------------------------------------------------------------
def start(self):
"""启动客户端"""
# 连接端口
print( 'conenct to req:{0}'.format(self.__reqAddress))
self.__socketREQ.connect(self.__reqAddress)
print( 'connect to sub:{0}'.format(self.__subAddress))
self.__socketSUB.connect(self.__subAddress)
# 将服务器设为启动
self.__active = True
# 启动工作线程
if not self.__thread.isAlive():
self.__thread.start()
#----------------------------------------------------------------------
def stop(self):
"""停止客户端"""
# 将客户端设为停止
self.__active = False
# 等待工作线程退出
if self.__thread.isAlive():
self.__thread.join()
#----------------------------------------------------------------------
def run(self):
"""客户端运行函数"""
while self.__active:
# 使用poll来等待事件到达,等待1秒(1000毫秒)
if not self.__socketSUB.poll(1000):
continue
# 从订阅socket收取广播数据
topic, datab = self.__socketSUB.recv_multipart()
if len(topic)>0:
topic = topic.decode("utf-8")
# 序列化解包
data = self.unpack(datab)
# 调用回调函数处理
self.callback(topic, data)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数,必须由用户实现"""
raise NotImplementedError
#----------------------------------------------------------------------
def subscribeTopic(self, topic):
"""
订阅特定主题的广播数据
可以使用topic=''来订阅所有的主题
"""
if len(topic)==0:
topic = ''
else:
topic = topic.encode('utf-8')
self.__socketSUB.setsockopt(zmq.SUBSCRIBE, topic)
########################################################################
class RemoteException(Exception):
"""RPC远程异常"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
self.__value = value
#----------------------------------------------------------------------
def __str__(self):
"""输出错误信息"""
return self.__value
|
trustedcoin.py | #!/usr/bin/env python
#
# Electrum - Lightweight SmartCash Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrum_smart as electrum
from electrum_smart import bitcoin
from electrum_smart import constants
from electrum_smart import keystore
from electrum_smart.bitcoin import *
from electrum_smart.mnemonic import Mnemonic
from electrum_smart import version
from electrum_smart.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_smart.i18n import _
from electrum_smart.plugins import BasePlugin, hook
from electrum_smart.util import NotEnoughFunds
from electrum_smart.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
response = requests.request(method, url, **kwargs)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# trustedcoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
if wallet.billing_info is None:
assert wallet.can_sign_without_server()
return None
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
billing_info = server.get(wallet.get_user_id()[1])
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
self.requesting = False
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum_smart.mnemonic import Mnemonic
from electrum_smart.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 24:
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = regenerate_key(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
|
tune_vitis.py | import os, sys, re, time, json
from options import options
import uptune as ut
import threading, random, subprocess
from collections import OrderedDict
import argparse
from multiprocessing import Process
platform = "/opt/xilinx/platforms/xilinx_u280_xdma_201920_1/xilinx_u280_xdma_201920_1.xpfm"
design = "optical_flow.cpp"
top = "optical_flow"
def run_process(cmd, pattern=None, env=None, debug=True):
print("[ DEBUG ] Running commands: \n{}\n".format(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
if err: raise RuntimeError("Error raised: ", err.decode())
if pattern: return re.findall(pattern, out.decode("utf-8"))
if debug:
print("[ DEBUG ] Commands outputs: \n{}\n".format(out.decode("utf-8")))
return out.decode("utf-8")
# Create config files for vitis
def config(option):
params_temp = "prop=run.impl_1.STEPS.{}={}\n"
flags_temp = "prop=run.impl_1.{{STEPS.{}.MORE OPTIONS}}="
with open("hls_config.ini", "w") as fp:
fp.write("kernel_frequency={}\n".format(option["Frequency"]))
with open("link_config.ini", "w") as fp:
fp.write("kernel_frequency={}\n".format(option["Frequency"]))
fp.write("[vivado]\n")
stage_options = dict()
disabled_stage = list()
stage_with_directive = list()
for key, value in option.items():
if key == "Frequency": continue
if "MORE" in key:
stage, flag = key.split(".ARGS.MORE.")
if stage not in stage_options:
stage_options[stage] = []
if value == "on":
stage_options[stage].append(flag)
elif "IS_ENABLED" in key:
stage, _ = key.split(".")
if value == "false":
disabled_stage.append(stage)
else:
fp.write(params_temp.format(key, value))
# optimization mode
elif "ARGS.DIRECTIVE" in key:
stage = key.split(".")[0]
if stage not in disabled_stage:
if value != "Disabled":
fp.write(params_temp.format(key, value))
stage_with_directive.append(stage)
# write stage flags
for stage, flags in stage_options.items():
if len(flags) == 0:
continue
if stage in disabled_stage:
continue
if stage in stage_with_directive:
continue
item = flags_temp.format(stage)
item += "{"
delim = ""
for flag in flags:
item += delim + "-" + flag
delim = " "
item += "}\n"
fp.write(item)
def execute(pwd):
cmd = "./run.sh {} {} {} {}".format(pwd, platform, design, top)
run_process(cmd)
def cleanup():
cmd = "make clean; rm -rf *log _x.hw* build_dir.hw.* link_config.ini hls_config.ini"
run_process(cmd)
def main(parse_only=False):
# Create an EDA option pool
option = OrderedDict()
if not parse_only:
cleanup()
for key, values in options.items():
option[key] = ut.tune(values[0], values, name=key)
config(option)
pwd = os.getcwd()
t1 = Process(target=execute, args=(pwd,))
t1.daemon = True
t1.start()
t1.join()
# Extract QoR result
qor = 0; index = "default"
work_path = os.path.abspath("./")
# TODO: Parse the data
# We just copy the rpt to separate folder
if os.getenv("UT_TUNE_START"):
index = ut.get_global_id()
work_path = os.path.join(ut.get_meta_data("UT_WORK_DIR"), "ut.temp")
index = "ut.rpt.{}".format(index)
rpt_folder = os.path.join(work_path, str(index))
cmd = "mkdir -p {}; cp build_dir.hw.xilinx_u280_xdma_201920_1/reports/link/imp/* {}"\
.format(rpt_folder, rpt_folder)
run_process(cmd)
# cp the vivado log and config files
cmd = "cp build_dir.hw.xilinx_u280_xdma_201920_1/link/vivado/vpl/vivado.log {}; cp *config.ini {}"\
.format(rpt_folder, rpt_folder)
run_process(cmd)
# Read frequency
rpt = "{}/xilinx_u280_xdma_201920_1_bb_locked_timing_summary_postroute_physopted.rpt".format(rpt_folder)
if os.path.isfile(rpt):
with open(rpt, "r") as fp:
content = fp.readlines()
index = 0
for line in content:
if "Design Timing Summary" in line:
numbers = content[index+6].strip().split()
wns = float(numbers[0])
tns = float(numbers[1])
qor = (1000 / float(option["Frequency"])) - wns
break
index += 1
else:
print("Cannot find vivado timing report...")
qor = float("inf")
# Remove temp in profiling phase
if os.getenv("UT_BEFORE_RUN_PROFILE"):
cleanup()
# Set the target
ut.target(qor, "min")
if __name__ == "__main__":
parse_only = False
main(parse_only)
|
start.py | #!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(100)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§")
])
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0]
try:
os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(os.environ["OUTCLEAN"])
except:
os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10"
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
# Before starting postfix, we need to check permissions on /queue
# in the event that postfix,postdrop id have changed
os.system("postfix set-permissions")
os.system("postfix start-fg")
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 41124
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
processes.py | # -*- coding: utf-8 -*-
import atexit
import heapq
import sys
import time
from threading import Thread
from plumbum.lib import IS_WIN32, six
if sys.version_info >= (3,):
from io import StringIO
from queue import Empty as QueueEmpty
from queue import Queue
else:
from cStringIO import StringIO
from Queue import Empty as QueueEmpty
from Queue import Queue
# ===================================================================================================
# utility functions
# ===================================================================================================
def _check_process(proc, retcode, timeout, stdout, stderr):
proc.verify(retcode, timeout, stdout, stderr)
return proc.returncode, stdout, stderr
def _iter_lines_posix(proc, decode, linesize, line_timeout=None):
try:
from selectors import EVENT_READ, DefaultSelector
except ImportError:
# Pre Python 3.4 implementation
from select import select
def selector():
while True:
rlist, _, _ = select([proc.stdout, proc.stderr], [], [], line_timeout)
if not rlist and line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
for stream in rlist:
yield (stream is proc.stderr), decode(stream.readline(linesize))
else:
# Python 3.4 implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout, EVENT_READ, 0)
sel.register(proc.stderr, EVENT_READ, 1)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
for key, mask in ready:
yield key.data, decode(key.fileobj.readline(linesize))
for ret in selector():
yield ret
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, decode(line)
for line in proc.stderr:
yield 1, decode(line)
def _iter_lines_win32(proc, decode, linesize, line_timeout=None):
class Piper(Thread):
def __init__(self, fd, pipe):
super().__init__(name="PlumbumPiper%sThread" % fd)
self.pipe = pipe
self.fd = fd
self.empty = False
self.daemon = True
super().start()
def read_from_pipe(self):
return self.pipe.readline(linesize)
def run(self):
for line in iter(self.read_from_pipe, b""):
queue.put((self.fd, decode(line)))
# self.pipe.close()
if line_timeout is None:
line_timeout = float("inf")
queue = Queue()
pipers = [Piper(0, proc.stdout), Piper(1, proc.stderr)]
last_line_ts = time.time()
empty = True
while True:
try:
yield queue.get_nowait()
last_line_ts = time.time()
empty = False
except QueueEmpty:
empty = True
if time.time() - last_line_ts > line_timeout:
raise ProcessLineTimedOut(
"popen line timeout expired",
getattr(proc, "argv", None),
getattr(proc, "machine", None),
)
if proc.poll() is not None:
break
if empty:
time.sleep(0.1)
for piper in pipers:
piper.join()
while True:
try:
yield queue.get_nowait()
except QueueEmpty:
break
if IS_WIN32:
_iter_lines = _iter_lines_win32
else:
_iter_lines = _iter_lines_posix
# ===================================================================================================
# Exceptions
# ===================================================================================================
class ProcessExecutionError(EnvironmentError):
"""Represents the failure of a process. When the exit code of a terminated process does not
match the expected result, this exception is raised by :func:`run_proc
<plumbum.commands.run_proc>`. It contains the process' return code, stdout, and stderr, as
well as the command line used to create the process (``argv``)
"""
def __init__(self, argv, retcode, stdout, stderr, message=None):
Exception.__init__(self, argv, retcode, stdout, stderr)
self.message = message
self.argv = argv
self.retcode = retcode
if six.PY3 and isinstance(stdout, six.bytes):
stdout = six.ascii(stdout)
if six.PY3 and isinstance(stderr, six.bytes):
stderr = six.ascii(stderr)
self.stdout = stdout
self.stderr = stderr
def __str__(self):
# avoid an import cycle
from plumbum.commands.base import shquote_list
stdout = "\n | ".join(str(self.stdout).splitlines())
stderr = "\n | ".join(str(self.stderr).splitlines())
cmd = " ".join(shquote_list(self.argv))
lines = []
if self.message:
lines = [self.message, "\nReturn code: | ", str(self.retcode)]
else:
lines = ["Unexpected exit code: ", str(self.retcode)]
cmd = "\n | ".join(cmd.splitlines())
lines += ["\nCommand line: | ", cmd]
if stdout:
lines += ["\nStdout: | ", stdout]
if stderr:
lines += ["\nStderr: | ", stderr]
return "".join(lines)
class ProcessTimedOut(Exception):
"""Raises by :func:`run_proc <plumbum.commands.run_proc>` when a ``timeout`` has been
specified and it has elapsed before the process terminated"""
def __init__(self, msg, argv):
Exception.__init__(self, msg, argv)
self.argv = argv
class ProcessLineTimedOut(Exception):
"""Raises by :func:`iter_lines <plumbum.commands.iter_lines>` when a ``line_timeout`` has been
specified and it has elapsed before the process yielded another line"""
def __init__(self, msg, argv, machine):
Exception.__init__(self, msg, argv, machine)
self.argv = argv
self.machine = machine
class CommandNotFound(AttributeError):
"""Raised by :func:`local.which <plumbum.machines.local.LocalMachine.which>` and
:func:`RemoteMachine.which <plumbum.machines.remote.RemoteMachine.which>` when a
command was not found in the system's ``PATH``"""
def __init__(self, program, path):
Exception.__init__(self, program, path)
self.program = program
self.path = path
# ===================================================================================================
# Timeout thread
# ===================================================================================================
class MinHeap(object):
def __init__(self, items=()):
self._items = list(items)
heapq.heapify(self._items)
def __len__(self):
return len(self._items)
def push(self, item):
heapq.heappush(self._items, item)
def pop(self):
heapq.heappop(self._items)
def peek(self):
return self._items[0]
_timeout_queue = Queue()
_shutting_down = False
def _timeout_thread_func():
waiting = MinHeap()
try:
while not _shutting_down:
if waiting:
ttk, _ = waiting.peek()
timeout = max(0, ttk - time.time())
else:
timeout = None
try:
proc, time_to_kill = _timeout_queue.get(timeout=timeout)
if proc is SystemExit:
# terminate
return
waiting.push((time_to_kill, proc))
except QueueEmpty:
pass
now = time.time()
while waiting:
ttk, proc = waiting.peek()
if ttk > now:
break
waiting.pop()
try:
if proc.poll() is None:
proc.kill()
proc._timed_out = True
except EnvironmentError:
pass
except Exception:
if _shutting_down:
# to prevent all sorts of exceptions during interpreter shutdown
pass
else:
raise
bgthd = Thread(target=_timeout_thread_func, name="PlumbumTimeoutThread")
bgthd.setDaemon(True)
bgthd.start()
def _register_proc_timeout(proc, timeout):
if timeout is not None:
_timeout_queue.put((proc, time.time() + timeout))
def _shutdown_bg_threads():
global _shutting_down
_shutting_down = True
# Make sure this still exists (don't throw error in atexit!)
if _timeout_queue:
_timeout_queue.put((SystemExit, 0))
# grace period
bgthd.join(0.1)
atexit.register(_shutdown_bg_threads)
# ===================================================================================================
# run_proc
# ===================================================================================================
def run_proc(proc, retcode, timeout=None):
"""Waits for the given process to terminate, with the expected exit code
:param proc: a running Popen-like object, with all the expected methods.
:param retcode: the expected return (exit) code of the process. It defaults to 0 (the
convention for success). If ``None``, the return code is ignored.
It may also be a tuple (or any object that supports ``__contains__``)
of expected return codes.
:param timeout: the number of seconds (a ``float``) to allow the process to run, before
forcefully terminating it. If ``None``, not timeout is imposed; otherwise
the process is expected to terminate within that timeout value, or it will
be killed and :class:`ProcessTimedOut <plumbum.cli.ProcessTimedOut>`
will be raised
:returns: A tuple of (return code, stdout, stderr)
"""
_register_proc_timeout(proc, timeout)
stdout, stderr = proc.communicate()
proc._end_time = time.time()
if not stdout:
stdout = six.b("")
if not stderr:
stderr = six.b("")
if getattr(proc, "custom_encoding", None):
stdout = stdout.decode(proc.custom_encoding, "ignore")
stderr = stderr.decode(proc.custom_encoding, "ignore")
return _check_process(proc, retcode, timeout, stdout, stderr)
# ===================================================================================================
# iter_lines
# ===================================================================================================
BY_POSITION = object()
BY_TYPE = object()
DEFAULT_ITER_LINES_MODE = BY_POSITION
def iter_lines(
proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", None) or "utf-8"
decode = lambda s: s.decode(encoding, errors="replace").rstrip()
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
|
Checker.py | import requests, socket
import time
import gevent
import threading
from Utils.redisdb import RedisClient
from Valid_check.Headers import headers
from proxy_spider.proxyspider import run_spider
from gevent import monkey;monkey.patch_all()
requests.adapters.DEFAULT_RETRIES = 5 #设置最大重连次数
socket.setdefaulttimeout(20) #设置默认超时时间
# 验证网站列表
verifyWeb = ["http://httpbin.org/ip",
"http://www.qq.com",
"https://www.zhihu.com",
"https://www.douban.com",
]
"""添加验证网页"""
def addVerify(web):
if web not in verifyWeb:
verifyWeb.append(web)
else:
print("该网站已存在")
"""删除验证网页"""
def delVerify(web):
if web in verifyWeb:
verifyWeb.remove(web)
else:
print("该网站不存在")
"""
可用性验证函数
IP:str,需要验证的IP,格式为"IP:port"
web:str,目标验证网站,格式为"http/https://xxx.xxxx.xxx"
ips:IP实例,来自Utils.IP的class IP,进行增减分数操作对象
IPct:RedisClient实例,来自Utils.redisdb的class RedisClient,包含增减分数操作函数
"""
def validIP(IP,web,ips,IPct):
'''检查IP可用性'''
# 设置代理信息
proxies = {
'http': IP,
'https': IP,
}
'''开始校验'''
try:
if web == "https://www.zhihu.com":
r = requests.get(web, proxies=proxies, headers=headers["https://www.zhihu.com"], timeout=(10, 5)) # 知乎需要header,否则一个也过不了
else:
r = requests.get(web, proxies=proxies, timeout=(10, 5)) # 一般情况
if r.status_code == 200:
back = r.elapsed.seconds # 根据响应时间决定分数
if back < 1:
IPct.increase(ips, 3)
elif 1 <= back < 3:
IPct.increase(ips, 2)
elif 3 <= back < 5:
IPct.increase(ips, 1)
else:
IPct.decrease(ips, 1)
except:
IPct.decrease(ips, 1)
def runCheck():
IPct = RedisClient()
"""
持续进行验证,保证IP可用性
使用gevent,内置事件驱动的异步
"""
while 1:
"""对所有目标网页一次依次进行验证"""
for web in verifyWeb:
js = [] # 对每个IP分别验证每个网站
num = IPct.count() # 库中IP数量
ip = IPct.batch(0, num) # 获取数量
"""对当前所有库中IP进行验证"""
for ips in ip:
IP = ips.getAddress() + ":" + str(ips.getPort())
t = gevent.spawn(validIP, IP, web, ips, IPct)
js.append(t)
gevent.joinall(js)
"""验证后情况"""
num = IPct.count() # 库中IP数量
print("当前验证网站为:%s" % web)
print("当前剩余可用IP:%s" % num)
print(IPct.ipstatus())
time.sleep(10)
# =================当库中IP数量不足时,开启新线程爬取=================
num = IPct.count() # 库中IP数量
if num < 300:
s = threading.Thread(target=run_spider(), args=())
s.start()
time.sleep(30)
if __name__ == "__main__":
runCheck()
|
application.py | import logging
from threading import Thread, Lock
import time
import socket
from units import format_bits_as_measure, format_bits_as_measure_per_second
class Application:
def __init__(self, port, interval, log_format, verbose, is_json):
self.port = port
self.interval = interval
self.log_format = log_format
self.is_json = is_json
self.logger = self.__get_logger(verbose)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__bits_transfered_last_interval = 0
self.__bits_transfered_last_second_mutex = Lock()
self.__logging_thread = None
def run(self):
'''
run is the main function to run the application.
run will call the handle method and handle OS interruptions.
'''
try:
self.handle()
except KeyboardInterrupt:
self.logger.info('CTRL+c pressed, now Exiting application...See you soon :-)')
def handle(self):
'''
handle is main method to be implemented by each class.
handle should always be implemented by child class.
'''
raise NotImplementedError('Abstract method <handle> should be implemented by child class.')
def start_daemon_logging_thread(self, mode):
'''
start_daemon_logging_thread starts a daemon thread which logs the amount of data transfered each interval.
mode - The transfered mode as string: Usually just "sent" ou "received"
'''
self.logger.debug('Creating daemon thread with parameter {}'.format(mode))
self.__logging_thread = Thread(target=self.__run_logging_thread, args=(mode,))
self.__logging_thread.setDaemon(True)
self.__logging_thread.start()
def increase_transfered_data(self, incr_by):
'''
increase_transfered_data thread safely increases the amount of transfered data by incr_by
incr_by - The value to increase the amount of data transfered
'''
self.__bits_transfered_last_second_mutex.acquire()
try:
self.__bits_transfered_last_interval += incr_by
except Exception as e:
self.logger.critical('Error when handling __bits_transfered_last_interval increase. ' + str(e))
finally:
self.__bits_transfered_last_second_mutex.release()
def get_ip_address(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def __run_logging_thread(self, mode):
self.logger.debug('Logging thread called with parameter {}'.format(mode))
while True:
time.sleep(self.interval)
self.__bits_transfered_last_second_mutex.acquire()
self.logger.debug('Total bits {} last interval: {}'.format(mode, self.__bits_transfered_last_interval))
total_transfered_with_unit = format_bits_as_measure(self.__bits_transfered_last_interval, self.log_format)
bits_per_second_transfered = self.__bits_transfered_last_interval / self.interval
transfered_per_second_with_unit = format_bits_as_measure_per_second(bits_per_second_transfered, self.log_format)
self.__log_transfered_info(mode, total_transfered_with_unit, transfered_per_second_with_unit)
# This is ugly. Logging thread ALSO resets for the next printing iteration.
self.logger.debug('Cleaning up bits sent in last interval mode {}'.format(mode))
self.__bits_transfered_last_interval = 0
self.__bits_transfered_last_second_mutex.release()
def __log_transfered_info(self, mode, total_transfered_with_unit, transfered_per_second_with_unit):
if self.is_json:
print('{')
print(' "mode":{},\n "transfered":{},\n "bandwidth:{}'.format(mode, total_transfered_with_unit, transfered_per_second_with_unit))
print('}')
else:
self.logger.info('| {} | Transfered {} | Bandwidth {}'.format(mode, total_transfered_with_unit, transfered_per_second_with_unit))
def __get_logger(self, verbose):
logger = logging.getLogger('application')
ch = logging.StreamHandler()
log_level = logging.DEBUG if verbose else logging.INFO
logger.setLevel(log_level)
ch.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
speedtest.py | # -*- coding: utf-8 -*-
"""
Perform a bandwidth test with speedtest-cli.
Configuration parameters:
button_share: mouse button to share an URL (default None)
format: display format for this module
*(default "speedtest[\?if=elapsed&color=elapsed_time "
"{elapsed_time}s][ [\?color=download ↓{download}Mbps] "
"[\?color=upload ↑{upload}Mbps]]")*
thresholds: specify color thresholds to use
*(default {"upload": [(0, "violet")], "ping": [(0, "#fff381")],
"download": [(0, "cyan")], "elapsed_time": [(0, "#1cbfff")]})*
Control placeholders:
{elapsed} elapsed time state, eg False, True
Format placeholders:
{bytes_sent} bytes sent during test (in MB), eg 52.45
{bytes_received} bytes received during test (in MB), eg 70.23
{client_country} client country code, eg FR
{client_ip} client ip, eg 78.194.13.7
{client_isp} client isp, eg Free SAS
{client_ispdlavg} client isp download average, eg 0
{client_isprating} client isp rating, eg 3.7
{client_ispulavg} client isp upload average, eg 0
{client_lat} client latitude, eg 48.8534
{client_loggedin} client logged in, eg 0
{client_lon} client longitude, eg 2.3487999999999998
{client_rating} client rating, eg 0
{download} download speed (in MB), eg 20.23
{elapsed_time} elapsed time since speedtest start
{ping} ping time in ms to speedtest server
{server_cc} server country code, eg FR
{server_country} server country, eg France
{server_d} server distance, eg 2.316599376968091
{server_host} server host, eg speedtest.telecom-paristech.fr:8080
{server_id} server id, eg 11977
{share} share, eg share url
{timestamp} timestamp, eg 2018-08-30T16:27:25.318212Z
{server_lat} server latitude, eg 48.8742
{server_latency} server latency, eg 8.265
{server_lon} server longitude, eg 2.3470
{server_name} server name, eg Paris
{server_sponsor} server sponsor, eg Télécom ParisTech
{server_url} server url, eg http://speedtest.telecom-paristech...
{upload} upload speed (in MB), eg 20.23
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Requires:
speedtest-cli: Command line interface for testing Internet bandwidth
@author Cyril Levis (@cyrinux)
Examples:
```
# show detailed elapsed_time|download/upload
speedtest {
format = "speedtest[\?soft ][\?if=elapsed [\?color=darkgray [time "
format += "[\?color=elapsed_time {elapsed_time} s]]]|[\?color=darkgray "
# format += "ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]]"
}
# show everything
speedtest {
format = "speedtest[\?soft ][\?color=darkgray "
format += "[time [\?color=elapsed_time {elapsed_time} s]][\?soft ]"
format += "[ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]]"
}
# minimal
speedtest {
format = "speedtest[\?soft ][\?if=elapsed "
format += "[\?color=elapsed_time {elapsed_time}]|"
# format += "[\?color=ping {ping}] "
format += "[[\?color=download {download}] [\?color=upload {upload}]]]"
}
# don't hide data on reset
speedtest {
format = "speedtest[\?soft ][\?color=darkgray time "
format += "[\?color=elapsed_time {elapsed_time} s] "
# format += "ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]"
}
# don't hide data on reset, minimal
speedtest {
format = "speedtest[\?soft ][[\?color=elapsed_time {elapsed_time}] "
# format += "[\?color=ping {ping}] "
format += "[\?color=download {download}] [\?color=upload {upload}]]"
}
```
SAMPLE OUTPUT
[
{"full_text": "speedtest "},
{"full_text": "19.76Mbps ", "color": "#00ffff"},
{"full_text": "3.86Mbps", "color": "#ee82ee"},
]
time+ping
[
{"full_text": "speedtest "},
{"full_text": "time ", "color": "#a9a9a9"},
{"full_text": "24.65 s ", "color": "#1cbfff"},
{"full_text": "ping ", "color": "#a9a9a9"},
{"full_text": "28.27 ms", "color": "#ffff00"},
]
details
[
{"full_text": "speedtest "},
{"full_text": "download ", "color": "#a9a9a9"},
{"full_text": "18.2Mbps ", "color": "#00ffff"},
{"full_text": "upload ", "color": "#a9a9a9"},
{"full_text": "19.2Mbps", "color": "#ee82ee"},
]
"""
from json import loads
from threading import Thread
from time import time
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
button_share = None
format = (
u"speedtest[\?if=elapsed&color=elapsed_time "
u"{elapsed_time}s][ [\?color=download ↓{download}Mbps] "
u"[\?color=upload ↑{upload}Mbps]]"
)
thresholds = {
"download": [(0, "cyan")],
"elapsed_time": [(0, "#1cbfff")],
"ping": [(0, "#fff381")],
"upload": [(0, "violet")],
}
class Meta:
update_config = {
"update_placeholder_format": [
{
"format_strings": ["format"],
"placeholder_formats": {
"bytes_received": ":.2f",
"bytes_sent": ":.2f",
"download": ":.2f",
"elapsed_time": ":.2f",
"ping": ":.2f",
"server_d": ":.2f",
"upload": ":.2f",
},
}
]
}
def post_config_hook(self):
self.speedtest_command = "speedtest-cli --json --secure"
if not self.py3.check_commands(self.speedtest_command.split()[0]):
raise Exception(STRING_NOT_INSTALLED)
# init
self.button_refresh = 2
self.placeholders = self.py3.get_placeholders_list(self.format)
self.speedtest_data = self.py3.storage_get("speedtest_data") or {}
self.thread = None
self.thresholds_init = self.py3.get_color_names_list(self.format)
# remove elapsed_time
if "elapsed_time" in self.placeholders:
self.placeholders.remove("elapsed_time")
# share
if self.button_share:
self.speedtest_command += " --share"
# perform download/upload based on placeholders
tests = ["download", "upload"]
if any(x in tests for x in self.placeholders):
for x in tests:
if x not in self.placeholders:
self.speedtest_command += " --no-{}".format(x)
def _set_speedtest_data(self):
# start
self.start_time = time()
self.speedtest_data["elapsed"] = True
try:
self.speedtest_data = self.py3.flatten_dict(
loads(self.py3.command_output(self.speedtest_command)), delimiter="_"
)
for x in ["download", "upload", "bytes_received", "bytes_sent"]:
if x not in self.placeholders or x not in self.speedtest_data:
continue
si = False if "bytes" in x else True
self.speedtest_data[x], unit = self.py3.format_units(
self.speedtest_data[x], unit="MB", si=si
)
except self.py3.CommandError:
pass
# end
self.speedtest_data["elapsed"] = False
self.speedtest_data["elapsed_time"] = time() - self.start_time
def speedtest(self):
if self.speedtest_data.get("elapsed"):
cached_until = 0
self.speedtest_data["elapsed_time"] = time() - self.start_time
else:
cached_until = self.py3.CACHE_FOREVER
self.py3.storage_set("speedtest_data", self.speedtest_data)
# thresholds
for x in self.thresholds_init:
if x in self.speedtest_data:
self.py3.threshold_get_color(self.speedtest_data[x], x)
return {
"cached_until": self.py3.time_in(cached_until),
"full_text": self.py3.safe_format(self.format, self.speedtest_data),
}
def on_click(self, event):
button = event["button"]
if button == self.button_share:
share = self.speedtest_data.get("share")
if share:
self.py3.command_run("xdg-open {}".format(share))
if button == self.button_refresh:
if self.thread and not self.thread.isAlive():
self.thread = None
if self.thread is None:
self.thread = Thread(target=self._set_speedtest_data)
self.thread.daemon = True
self.thread.start()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
test_main.py | # -*- coding: utf-8 -*-
import msvcrt
from time import sleep
import sys
import threading
import os
def pwd_input():
chars = []
while True:
try:
newChar = msvcrt.getch().decode(encoding="utf-8")
except:
return input("你很可能不是在cmd命令行下运行,密码输入将不能隐藏:")
if newChar in '\r\n': # 如果是换行,则输入结束
break
elif newChar == '\b': # 如果是退格,则删除密码末尾一位并且删除一个星号
if chars:
del chars[-1]
msvcrt.putch('\b'.encode(encoding='utf-8')) # 光标回退一格
msvcrt.putch(' '.encode(encoding='utf-8')) # 输出一个空格覆盖原来的星号
msvcrt.putch('\b'.encode(encoding='utf-8')) # 光标回退一格准备接受新的输入
else:
chars.append(newChar)
msvcrt.putch('*'.encode(encoding='utf-8')) # 显示为星号
return (''.join(chars))
def t1():
# pwd = input("please input password...\n")
# print("\nyour password is:{}".format(pwd))
while 1:
print("1")
sleep(2)
os._exit(0)
def t2():
while 1:
print("2")
sleep(1)
if __name__ == '__main__':
thread2 = threading.Thread(target=t2)
thread2.setDaemon(False)
thread1 = threading.Thread(target=t1)
thread1.setDaemon(False)
thread1.start()
thread2.start()
#thread1.join()
#thread2.join()
# main()
|
gam.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GAM
#
# Copyright 2015, LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
u"""GAM is a command line tool which allows Administrators to control their Google Apps domain and accounts.
With GAM you can programatically create users, turn on/off services for users like POP and Forwarding and much more.
For more information, see http://git.io/gam
"""
__author__ = u'Jay Lee <jay0lee@gmail.com>'
__version__ = u'3.62'
__license__ = u'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)'
import sys, os, time, datetime, random, socket, csv, platform, re, calendar, base64, string, StringIO, subprocess
import json
import httplib2
import googleapiclient
import googleapiclient.discovery
import googleapiclient.errors
import googleapiclient.http
import oauth2client.client
import oauth2client.file
import oauth2client.tools
import mimetypes
import ntpath
GAM_URL = u'http://git.io/gam'
GAM_INFO = u'GAM {0} - {1} / {2} / Python {3}.{4}.{5} {6} / {7} {8} /'.format(__version__, GAM_URL,
__author__,
sys.version_info[0], sys.version_info[1], sys.version_info[2],
sys.version_info[3],
platform.platform(), platform.machine())
GAM_RELEASES = u'https://github.com/jay0lee/GAM/releases'
GAM_WIKI = u'https://github.com/jay0lee/GAM/wiki'
GAM_WIKI_CREATE_CLIENT_SECRETS = GAM_WIKI+u'/CreatingClientSecretsFile#creating-your-own-oauth2servicejson'
GAM_APPSPOT = u'https://gam-update.appspot.com'
GAM_APPSPOT_LATEST_VERSION = GAM_APPSPOT+u'/latest-version.txt?v='+__version__
GAM_APPSPOT_LATEST_VERSION_ANNOUNCEMENT = GAM_APPSPOT+u'/latest-version-announcement.txt?v='+__version__
TRUE = u'true'
FALSE = u'false'
true_values = [u'on', u'yes', u'enabled', u'true', u'1']
false_values = [u'off', u'no', u'disabled', u'false', u'0']
usergroup_types = [u'user', u'users', u'group', u'ou', u'org',
u'ou_and_children', u'ou_and_child', u'query',
u'license', u'licenses', u'licence', u'licences', u'file', u'all',
u'cros']
ERROR = u'ERROR'
ERROR_PREFIX = ERROR+u': '
WARNING = u'WARNING'
WARNING_PREFIX = WARNING+u': '
FN_EXTRA_ARGS_TXT = u'extra-args.txt'
FN_GAMSCOPES_JSON = u'gamscopes.json'
FN_LAST_UPDATE_CHECK_TXT = u'lastupdatecheck.txt'
FN_OAUTH2SERVICE_JSON = u'oauth2service.json'
MY_CUSTOMER = u'my_customer'
UNKNOWN = u'Unknown'
#
# Global variables
#
# The following GM_XXX constants are arbitrary but must be unique
# Most errors print a message and bail out with a return code
# Some commands want to set a non-zero return code but not bail
GM_SYSEXITRC = u'sxrc'
# Path to gam
GM_GAM_PATH = u'gpth'
# Are we on Windows?
GM_WINDOWS = u'wndo'
# Encodings
GM_SYS_ENCODING = u'syen'
# Shared by batch_worker and run_batch
GM_BATCH_QUEUE = u'batq'
# Extra arguments to pass to GAPI functions
GM_EXTRA_ARGS_DICT = u'exad'
# Scopes retrieved from gamscopes.json
GM_GAMSCOPES_LIST = u'scop'
# GAM admin user
GM_ADMIN = u'admin'
# Current API user
GM_CURRENT_API_USER = u'capu'
# Current API scope
GM_CURRENT_API_SCOPES = u'caps'
# Values retrieved from oauth2service.json
GM_OAUTH2SERVICE_KEY = u'oauk'
GM_OAUTH2SERVICE_ACCOUNT_EMAIL = u'oaae'
GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID = u'oaci'
# File containing time of last GAM update check
GM_LAST_UPDATE_CHECK_TXT = u'lupc'
# Dictionary mapping OrgUnit ID to Name
GM_MAP_ORGUNIT_ID_TO_NAME = u'oi2n'
# Dictionary mapping Role ID to Name
GM_MAP_ROLE_ID_TO_NAME = u'ri2n'
# Dictionary mapping Role Name to ID
GM_MAP_ROLE_NAME_TO_ID = u'rn2i'
# Dictionary mapping User ID to Name
GM_MAP_USER_ID_TO_NAME = u'ui2n'
#
GM_Globals = {
GM_SYSEXITRC: 0,
GM_GAM_PATH: os.path.dirname(os.path.realpath(__file__)),
GM_WINDOWS: os.name == u'nt',
GM_SYS_ENCODING: sys.getfilesystemencoding() if os.name == u'nt' else u'utf-8',
GM_BATCH_QUEUE: None,
GM_EXTRA_ARGS_DICT: {u'prettyPrint': False},
GM_GAMSCOPES_LIST: [],
GM_ADMIN: None,
GM_CURRENT_API_USER: None,
GM_CURRENT_API_SCOPES: [],
GM_OAUTH2SERVICE_KEY: None,
GM_OAUTH2SERVICE_ACCOUNT_EMAIL: None,
GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID: None,
GM_LAST_UPDATE_CHECK_TXT: u'',
GM_MAP_ORGUNIT_ID_TO_NAME: None,
GM_MAP_ROLE_ID_TO_NAME: None,
GM_MAP_ROLE_NAME_TO_ID: None,
GM_MAP_USER_ID_TO_NAME: None,
}
#
# Global variables defined by environment variables/signal files
#
# When retrieving lists of Google Drive activities from API, how many should be retrieved in each chunk
GC_ACTIVITY_MAX_RESULTS = u'activity_max_results'
# Automatically generate gam batch command if number of users specified in gam users xxx command exceeds this number
# Default: 0, don't automatically generate gam batch commands
GC_AUTO_BATCH_MIN = u'auto_batch_min'
# GAM cache directory. If no_cache is specified, this variable will be set to None
GC_CACHE_DIR = u'cache_dir'
# Character set of batch, csv, data files
GC_CHARSET = u'charset'
# GAM config directory containing client_secrets.json, oauth2.txt, oauth2service.json, extra_args.txt
GC_CONFIG_DIR = u'config_dir'
# custmerId from gam.cfg or retrieved from Google
GC_CUSTOMER_ID = u'customer_id'
# If debug_level > 0: extra_args[u'prettyPrint'] = True, httplib2.debuglevel = gam_debug_level, appsObj.debug = True
GC_DEBUG_LEVEL = u'debug_level'
# When retrieving lists of ChromeOS/Mobile devices from API, how many should be retrieved in each chunk
GC_DEVICE_MAX_RESULTS = u'device_max_results'
# Domain obtained from gam.cfg or oauth2.txt
GC_DOMAIN = u'domain'
# Google Drive download directory
GC_DRIVE_DIR = u'drive_dir'
# When retrieving lists of Drive files/folders from API, how many should be retrieved in each chunk
GC_DRIVE_MAX_RESULTS = u'drive_max_results'
# Path to extra_args.txt
GC_EXTRA_ARGS = u'extra_args'
# Path to gamscopes.json
GC_GAMSCOPES_JSON = u'gamscopes_json'
# If no_browser is False, output_csv won't open a browser when todrive is set
GC_NO_BROWSER = u'no_browser'
# Disable GAM API caching
GC_NO_CACHE = u'no_cache'
# Disable GAM update check
GC_NO_UPDATE_CHECK = u'no_update_check'
# Disable SSL certificate validation
GC_NO_VERIFY_SSL = u'no_verify_ssl'
# Number of threads for gam batch
GC_NUM_THREADS = u'num_threads'
# Path to oauth2service.json
GC_OAUTH2SERVICE_JSON = u'oauth2service_json'
# GAM config directory containing admin-settings-v1.json, cloudprint-v2.json
GC_SITE_DIR = u'site_dir'
# When retrieving lists of Users from API, how many should be retrieved in each chunk
GC_USER_MAX_RESULTS = u'user_max_results'
GC_Defaults = {
GC_ACTIVITY_MAX_RESULTS: 100,
GC_AUTO_BATCH_MIN: 0,
GC_CACHE_DIR: u'',
GC_CHARSET: u'utf-8',
GC_CONFIG_DIR: u'',
GC_CUSTOMER_ID: MY_CUSTOMER,
GC_DEBUG_LEVEL: 0,
GC_DEVICE_MAX_RESULTS: 500,
GC_DOMAIN: u'',
GC_DRIVE_DIR: u'',
GC_DRIVE_MAX_RESULTS: 1000,
GC_EXTRA_ARGS: u'',
GC_GAMSCOPES_JSON: FN_GAMSCOPES_JSON,
GC_NO_BROWSER: FALSE,
GC_NO_CACHE: FALSE,
GC_NO_UPDATE_CHECK: FALSE,
GC_NO_VERIFY_SSL: FALSE,
GC_NUM_THREADS: 5,
GC_OAUTH2SERVICE_JSON: FN_OAUTH2SERVICE_JSON,
GC_SITE_DIR: u'',
GC_USER_MAX_RESULTS: 500,
}
GC_Values = {}
GC_TYPE_BOOLEAN = u'bool'
GC_TYPE_CHOICE = u'choi'
GC_TYPE_DIRECTORY = u'dire'
GC_TYPE_EMAIL = u'emai'
GC_TYPE_FILE = u'file'
GC_TYPE_INTEGER = u'inte'
GC_TYPE_LANGUAGE = u'lang'
GC_TYPE_STRING = u'stri'
GC_VAR_TYPE_KEY = u'type'
GC_VAR_ENVVAR_KEY = u'enva'
GC_VAR_LIMITS_KEY = u'lmit'
GC_VAR_SFFT_KEY = u'sfft'
GC_VAR_INFO = {
GC_ACTIVITY_MAX_RESULTS: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_ACTIVITY_MAX_RESULTS', GC_VAR_LIMITS_KEY: (1, 500)},
GC_AUTO_BATCH_MIN: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_AUTOBATCH', GC_VAR_LIMITS_KEY: (0, None)},
GC_CACHE_DIR: {GC_VAR_TYPE_KEY: GC_TYPE_DIRECTORY, GC_VAR_ENVVAR_KEY: u'GAMCACHEDIR'},
GC_CHARSET: {GC_VAR_TYPE_KEY: GC_TYPE_STRING, GC_VAR_ENVVAR_KEY: u'GAM_CHARSET'},
GC_CONFIG_DIR: {GC_VAR_TYPE_KEY: GC_TYPE_DIRECTORY, GC_VAR_ENVVAR_KEY: u'GAMUSERCONFIGDIR'},
GC_CUSTOMER_ID: {GC_VAR_TYPE_KEY: GC_TYPE_STRING, GC_VAR_ENVVAR_KEY: u'CUSTOMER_ID'},
GC_DEBUG_LEVEL: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'debug.gam', GC_VAR_LIMITS_KEY: (0, None), GC_VAR_SFFT_KEY: (0, 4)},
GC_DEVICE_MAX_RESULTS: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_DEVICE_MAX_RESULTS', GC_VAR_LIMITS_KEY: (1, 1000)},
GC_DOMAIN: {GC_VAR_TYPE_KEY: GC_TYPE_STRING, GC_VAR_ENVVAR_KEY: u'GA_DOMAIN'},
GC_DRIVE_DIR: {GC_VAR_TYPE_KEY: GC_TYPE_DIRECTORY, GC_VAR_ENVVAR_KEY: u'GAMDRIVEDIR'},
GC_DRIVE_MAX_RESULTS: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_DRIVE_MAX_RESULTS', GC_VAR_LIMITS_KEY: (1, 1000)},
GC_EXTRA_ARGS: {GC_VAR_TYPE_KEY: GC_TYPE_FILE, GC_VAR_ENVVAR_KEY: FN_EXTRA_ARGS_TXT, GC_VAR_SFFT_KEY: (u'', FN_EXTRA_ARGS_TXT)},
GC_GAMSCOPES_JSON: {GC_VAR_TYPE_KEY: GC_TYPE_FILE, GC_VAR_ENVVAR_KEY: u'GAMSCOPESFILE'},
GC_NO_BROWSER: {GC_VAR_TYPE_KEY: GC_TYPE_BOOLEAN, GC_VAR_ENVVAR_KEY: u'nobrowser.txt', GC_VAR_SFFT_KEY: (False, True)},
GC_NO_CACHE: {GC_VAR_TYPE_KEY: GC_TYPE_BOOLEAN, GC_VAR_ENVVAR_KEY: u'nocache.txt', GC_VAR_SFFT_KEY: (False, True)},
GC_NO_UPDATE_CHECK: {GC_VAR_TYPE_KEY: GC_TYPE_BOOLEAN, GC_VAR_ENVVAR_KEY: u'noupdatecheck.txt', GC_VAR_SFFT_KEY: (False, True)},
GC_NO_VERIFY_SSL: {GC_VAR_TYPE_KEY: GC_TYPE_BOOLEAN, GC_VAR_ENVVAR_KEY: u'noverifyssl.txt', GC_VAR_SFFT_KEY: (False, True)},
GC_NUM_THREADS: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_THREADS', GC_VAR_LIMITS_KEY: (1, None)},
GC_OAUTH2SERVICE_JSON: {GC_VAR_TYPE_KEY: GC_TYPE_FILE, GC_VAR_ENVVAR_KEY: u'OAUTHSERVICEFILE'},
GC_SITE_DIR: {GC_VAR_TYPE_KEY: GC_TYPE_DIRECTORY, GC_VAR_ENVVAR_KEY: u'GAMSITECONFIGDIR'},
GC_USER_MAX_RESULTS: {GC_VAR_TYPE_KEY: GC_TYPE_INTEGER, GC_VAR_ENVVAR_KEY: u'GAM_USER_MAX_RESULTS', GC_VAR_LIMITS_KEY: (1, 500)},
}
MESSAGE_BATCH_CSV_DASH_DEBUG_INCOMPATIBLE = u'"gam {0} - ..." is not compatible with debugging. Disable debugging by deleting debug.gam and try again.'
MESSAGE_API_ACCESS_CONFIG = u'API access is configured in your Control Panel under: Security-Show more-Advanced settings-Manage API client access'
MESSAGE_API_ACCESS_DENIED = u'API access denied.\n\nPlease make sure the Service account Client ID: {0} is authorized for the API Scope(s): {1}\n\nPlease make sure the Admin email address: {2} is valid'
MESSAGE_GAM_EXITING_FOR_UPDATE = u'GAM is now exiting so that you can overwrite this old version with the latest release'
MESSAGE_GAM_OUT_OF_MEMORY = u'GAM has run out of memory. If this is a large Google Apps instance, you should use a 64-bit version of GAM on Windows or a 64-bit version of Python on other systems.'
MESSAGE_HEADER_NOT_FOUND_IN_CSV_HEADERS = u'Header "{0}" not found in CSV headers of "{1}".'
MESSAGE_HIT_CONTROL_C_TO_UPDATE = u'\n\nHit CTRL+C to visit the GAM website and download the latest release or wait 15 seconds continue with this boring old version. GAM won\'t bother you with this announcement for 1 week or you can create a file named noupdatecheck.txt in the same location as gam.py or gam.exe and GAM won\'t ever check for updates.'
MESSAGE_INVALID_JSON = u'The file {0} has an invalid format.'
MESSAGE_NO_DISCOVERY_INFORMATION = u'No online discovery doc and {0} does not exist locally'
MESSAGE_NO_PYTHON_SSL = u'You don\'t have the Python SSL module installed so we can\'t verify SSL Certificates. You can fix this by installing the Python SSL module or you can live on the edge and turn SSL validation off by creating a file named noverifyssl.txt in the same location as gam.exe / gam.py'
MESSAGE_NO_SCOPES_FOR_API = u'There are no scopes authorized for the {0}; please run gam oauth create'
MESSAGE_NO_TRANSFER_LACK_OF_DISK_SPACE = u'Cowardly refusing to perform migration due to lack of target drive space. Source size: {0}mb Target Free: {1}mb'
MESSAGE_OAUTH2SERVICE_JSON_INVALID = u'The file {0} is missing required keys (client_email, client_id or private_key).'
MESSAGE_PLEASE_AUTHORIZE_SERVICE_ACCOUNT = u'Please authorize your Service account Client ID for the {0} scopes:\n\n{1}'
MESSAGE_REQUEST_COMPLETED_NO_FILES = u'Request completed but no results/files were returned, try requesting again'
MESSAGE_REQUEST_NOT_COMPLETE = u'Request needs to be completed before downloading, current status is: {0}'
MESSAGE_RESULTS_TOO_LARGE_FOR_GOOGLE_SPREADSHEET = u'Results are too large for Google Spreadsheets. Uploading as a regular CSV file.'
MESSAGE_SERVICE_NOT_APPLICABLE = u'Service not applicable for this address: {0}'
MESSAGE_WIKI_INSTRUCTIONS_OAUTH2SERVICE_JSON = u'Please follow the instructions at this site to setup a Service account.'
OAUTH_TOKEN_ERRORS = [u'access_denied', u'unauthorized_client: Unauthorized client or scope in request.', u'access_denied: Requested client not authorized.', u'invalid_grant: Not a valid email.', u'invalid_request: Invalid impersonation prn email address.']
def convertUTF8(data):
import collections
if isinstance(data, str):
return data
if isinstance(data, unicode):
if GM_Globals[GM_WINDOWS]:
return data
return data.encode(GM_Globals[GM_SYS_ENCODING])
if isinstance(data, collections.Mapping):
return dict(map(convertUTF8, data.iteritems()))
if isinstance(data, collections.Iterable):
return type(data)(map(convertUTF8, data))
return data
def win32_unicode_argv():
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
if argc.value > 0:
# Remove Python executable and commands if present
start = argc.value - len(sys.argv)
return [argv[i] for i in range(start, argc.value)]
from HTMLParser import HTMLParser
from re import sub
from sys import stderr
from traceback import print_exc
class _DeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = sub(u'[ \t\r\n]+', u' ', text)
self.__text.append(text + u' ')
def handle_starttag(self, tag, attrs):
if tag == u'p':
self.__text.append(u'\n\n')
elif tag == u'br':
self.__text.append(u'\n')
def handle_startendtag(self, tag, attrs):
if tag == u'br':
self.__text.append(u'\n\n')
def text(self):
return ''.join(self.__text).strip()
def dehtml(text):
try:
parser = _DeHTMLParser()
parser.feed(text.encode(u'utf-8'))
parser.close()
return parser.text()
except:
print_exc(file=stderr)
return text
def showUsage():
doGAMVersion()
print u'''
Usage: gam [OPTIONS]...
GAM. Retrieve or set Google Apps domain,
user, group and alias settings. Exhaustive list of commands
can be found at: https://github.com/jay0lee/GAM/wiki
Examples:
gam info domain
gam create user jsmith firstname John lastname Smith password secretpass
gam update user jsmith suspended on
gam.exe update group announcements add member jsmith
...
'''
#
# Error handling
#
def systemErrorExit(sysRC, message):
if message:
sys.stderr.write(u'\n{0}{1}\n'.format(ERROR_PREFIX, message))
sys.exit(sysRC)
def invalidJSONExit(fileName):
systemErrorExit(17, MESSAGE_INVALID_JSON.format(fileName))
def noPythonSSLExit():
systemErrorExit(8, MESSAGE_NO_PYTHON_SSL)
# Invalid CSV ~Header or ~~Header~~
def csvFieldErrorExit(fieldName, fieldNames):
systemErrorExit(3, MESSAGE_HEADER_NOT_FOUND_IN_CSV_HEADERS.format(fieldName, u','.join(fieldNames)))
def printLine(message):
sys.stdout.write(message+u'\n')
#
# Open a file
#
def openFile(filename, mode=u'rb'):
try:
if filename != u'-':
return open(filename, mode)
if mode.startswith(u'r'):
return StringIO.StringIO(unicode(sys.stdin.read()))
return sys.stdout
except IOError as e:
systemErrorExit(6, e)
#
# Close a file
#
def closeFile(f):
try:
f.close()
return True
except IOError as e:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, e))
return False
#
# Read a file
#
def readFile(filename, mode=u'rb', continueOnError=False, displayError=True):
try:
if filename != u'-':
with open(filename, mode) as f:
return f.read()
else:
return unicode(sys.stdin.read())
except IOError as e:
if continueOnError:
if displayError:
sys.stderr.write(u'{0}{1}\n'.format(WARNING_PREFIX, e))
return None
systemErrorExit(6, e)
#
# Write a file
#
def writeFile(filename, data, mode=u'wb', continueOnError=False, displayError=True):
try:
with open(filename, mode) as f:
f.write(data)
return True
except IOError as e:
if continueOnError:
if displayError:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, e))
return False
systemErrorExit(6, e)
# Get global domain from global admin email address
#
def getDomainFromAdmin():
if GM_Globals[GM_ADMIN]:
loc = GM_Globals[GM_ADMIN].find(u'@')
if loc > 0:
GC_Values[GC_DOMAIN] = GM_Globals[GM_ADMIN][loc+1:]
# Set global variables
# Check for GAM updates based on status of noupdatecheck.txt
#
def SetGlobalVariables():
def _getDefault(itemName, itemEntry):
if GC_VAR_SFFT_KEY in itemEntry:
GC_Defaults[itemName] = itemEntry[GC_VAR_SFFT_KEY][os.path.isfile(os.path.join(GC_Defaults[GC_CONFIG_DIR], itemEntry[GC_VAR_ENVVAR_KEY]))]
else:
value = os.environ.get(itemEntry[GC_VAR_ENVVAR_KEY], GC_Defaults[itemName])
if itemEntry[GC_VAR_TYPE_KEY] == GC_TYPE_INTEGER:
try:
number = int(value)
minVal, maxVal = itemEntry[GC_VAR_LIMITS_KEY]
if number < minVal:
number = minVal
elif maxVal and (number > maxVal):
number = maxVal
except ValueError:
number = GC_Defaults[itemName]
value = str(number)
GC_Defaults[itemName] = value
def _getScopesAdminDomainFromGamScopesJson():
GM_Globals[GM_GAMSCOPES_LIST] = []
json_string = readFile(GC_Values[GC_GAMSCOPES_JSON], continueOnError=True, displayError=False)
if json_string == None:
return
try:
json_data = json.loads(json_string)
except ValueError:
invalidJSONExit(GC_Values[GC_GAMSCOPES_JSON])
scopes = json_data.get(u'scopes', None)
if not isinstance(scopes, list):
invalidJSONExit(GC_Values[GC_GAMSCOPES_JSON])
GM_Globals[GM_GAMSCOPES_LIST] = list(set(scopes))
if not GM_Globals[GM_ADMIN]:
GM_Globals[GM_ADMIN] = json_data.get(u'admin', None)
if not GC_Values[GC_DOMAIN]:
GC_Values[GC_DOMAIN] = json_data.get(u'domain', GC_Defaults[GC_DOMAIN])
def _getCfgDirectory(itemName):
return GC_Defaults[itemName]
def _getCfgFile(itemName):
value = os.path.expanduser(GC_Defaults[itemName])
if (not value) and (itemName == GC_EXTRA_ARGS):
return value
if not os.path.isabs(value):
value = os.path.expanduser(os.path.join(GC_Values[GC_CONFIG_DIR], value))
return value
def _chkCfgDirectories():
for itemName in GC_VAR_INFO:
if GC_VAR_INFO[itemName][GC_VAR_TYPE_KEY] == GC_TYPE_DIRECTORY:
dirPath = GC_Values[itemName]
if not os.path.isdir(dirPath):
sys.stderr.write(u'{0}{1}={2}, Invalid Path\n'.format(WARNING_PREFIX, GC_VAR_INFO[itemName][GC_VAR_ENVVAR_KEY], dirPath))
def _chkCfgFiles():
for itemName in GC_VAR_INFO:
if GC_VAR_INFO[itemName][GC_VAR_TYPE_KEY] == GC_TYPE_FILE:
fileName = GC_Values[itemName]
if (not fileName) and (itemName == GC_EXTRA_ARGS):
continue
if not os.path.isfile(fileName):
sys.stderr.write(u'{0}{1}={2}, Not Found\n'.format(WARNING_PREFIX, GC_VAR_INFO[itemName][GC_VAR_ENVVAR_KEY], fileName))
GC_Defaults[GC_CONFIG_DIR] = GM_Globals[GM_GAM_PATH]
GC_Defaults[GC_CACHE_DIR] = os.path.join(GM_Globals[GM_GAM_PATH], u'gamcache')
GC_Defaults[GC_DRIVE_DIR] = GM_Globals[GM_GAM_PATH]
GC_Defaults[GC_SITE_DIR] = GM_Globals[GM_GAM_PATH]
for itemName, itemEntry in GC_VAR_INFO.items():
if itemEntry[GC_VAR_TYPE_KEY] == GC_TYPE_DIRECTORY:
_getDefault(itemName, itemEntry)
for itemName, itemEntry in GC_VAR_INFO.items():
if itemEntry[GC_VAR_TYPE_KEY] != GC_TYPE_DIRECTORY:
_getDefault(itemName, itemEntry)
if GC_Defaults[GC_OAUTH2SERVICE_JSON].find(u'.') == -1:
GC_Defaults[GC_OAUTH2SERVICE_JSON] += u'.json'
# Assign directories first
for itemName in GC_VAR_INFO:
if GC_VAR_INFO[itemName][GC_VAR_TYPE_KEY] == GC_TYPE_DIRECTORY:
GC_Values[itemName] = _getCfgDirectory(itemName)
for itemName in GC_VAR_INFO:
varType = GC_VAR_INFO[itemName][GC_VAR_TYPE_KEY]
if varType == GC_TYPE_FILE:
GC_Values[itemName] = _getCfgFile(itemName)
else:
GC_Values[itemName] = GC_Defaults[itemName]
GM_Globals[GM_LAST_UPDATE_CHECK_TXT] = os.path.join(GC_Values[GC_CONFIG_DIR], FN_LAST_UPDATE_CHECK_TXT)
if not GC_Values[GC_NO_UPDATE_CHECK]:
doGAMCheckForUpdates()
# Globals derived from config file values
httplib2.debuglevel = GC_Values[GC_DEBUG_LEVEL]
GM_Globals[GM_EXTRA_ARGS_DICT] = {u'prettyPrint': GC_Values[GC_DEBUG_LEVEL] > 0}
if GC_Values[GC_EXTRA_ARGS]:
import ConfigParser
ea_config = ConfigParser.ConfigParser()
ea_config.optionxform = str
ea_config.read(GC_Values[GC_EXTRA_ARGS])
GM_Globals[GM_EXTRA_ARGS_DICT].update(dict(ea_config.items(u'extra-args')))
GM_Globals[GM_OAUTH2SERVICE_KEY] = None
GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_EMAIL] = None
GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID] = None
_getScopesAdminDomainFromGamScopesJson()
if not GC_Values[GC_DOMAIN]:
getDomainFromAdmin()
_chkCfgDirectories()
_chkCfgFiles()
if GC_Values[GC_NO_CACHE]:
GC_Values[GC_CACHE_DIR] = None
return True
def doGAMCheckForUpdates(forceCheck=False):
import urllib2
try:
current_version = float(__version__)
except ValueError:
return
now_time = calendar.timegm(time.gmtime())
if not forceCheck:
last_check_time = readFile(GM_Globals[GM_LAST_UPDATE_CHECK_TXT], continueOnError=True, displayError=forceCheck)
if last_check_time == None:
last_check_time = 0
if last_check_time > now_time-604800:
return
try:
c = urllib2.urlopen(GAM_APPSPOT_LATEST_VERSION)
try:
latest_version = float(c.read())
except ValueError:
return
if forceCheck or (latest_version > current_version):
print u'Version: Check, Current: {0:.2f}, Latest: {1:.2f}'.format(current_version, latest_version)
if latest_version <= current_version:
writeFile(GM_Globals[GM_LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck)
return
a = urllib2.urlopen(GAM_APPSPOT_LATEST_VERSION_ANNOUNCEMENT)
announcement = a.read()
sys.stderr.write(announcement)
try:
printLine(MESSAGE_HIT_CONTROL_C_TO_UPDATE)
time.sleep(15)
except KeyboardInterrupt:
import webbrowser
webbrowser.open(GAM_RELEASES)
printLine(MESSAGE_GAM_EXITING_FOR_UPDATE)
sys.exit(0)
writeFile(GM_Globals[GM_LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck)
return
except (urllib2.HTTPError, urllib2.URLError):
return
def doGAMVersion():
import struct
print u'GAM {0} - {1}\n{2}\nPython {3}.{4}.{5} {6}-bit {7}\ngoogle-api-python-client {8}\n{9} {10}\nPath: {11}'.format(__version__, GAM_URL,
__author__,
sys.version_info[0], sys.version_info[1], sys.version_info[2],
struct.calcsize(u'P')*8, sys.version_info[3],
googleapiclient.__version__,
platform.platform(), platform.machine(),
GM_Globals[GM_GAM_PATH])
def handleOAuthTokenError(e, soft_errors):
if e.message in OAUTH_TOKEN_ERRORS:
if not GM_Globals[GM_CURRENT_API_USER]:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, MESSAGE_API_ACCESS_DENIED.format(GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID],
u','.join(GM_Globals[GM_CURRENT_API_SCOPES]), GM_Globals[GM_ADMIN])))
systemErrorExit(12, MESSAGE_API_ACCESS_CONFIG)
else:
systemErrorExit(19, MESSAGE_SERVICE_NOT_APPLICABLE.format(GM_Globals[GM_CURRENT_API_USER]))
if soft_errors:
sys.stderr.write(u'{0}Authentication Token Error - {1}\n'.format(ERROR_PREFIX, e))
return None
systemErrorExit(18, u'Authentication Token Error - {0}'.format(e))
def getGDataOAuthToken(gdataObject):
credentials = oauth2client.client.SignedJwtAssertionCredentials(GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_EMAIL],
GM_Globals[GM_OAUTH2SERVICE_KEY],
scope=GM_Globals[GM_CURRENT_API_SCOPES], user_agent=GAM_INFO, sub=GM_Globals[GM_ADMIN])
http = httplib2.Http(disable_ssl_certificate_validation=GC_Values[GC_NO_VERIFY_SSL],
cache=GC_Values[GC_CACHE_DIR])
try:
credentials.refresh(http)
except httplib2.ServerNotFoundError as e:
systemErrorExit(4, e)
except oauth2client.client.AccessTokenRefreshError, e:
return handleOAuthTokenError(e, False)
gdataObject.additional_headers[u'Authorization'] = u'Bearer {0}'.format(credentials.access_token)
return True
def checkGDataError(e, service):
# First check for errors that need special handling
if e[0].get(u'reason', u'') in [u'Token invalid - Invalid token: Stateless token expired', u'Token invalid - Invalid token: Token not found']:
getGDataOAuthToken(service)
return False
if e[0][u'body'].startswith(u'Required field must not be blank:') or e[0][u'body'].startswith(u'These characters are not allowed:'):
return e[0][u'body']
if e.error_code == 600 and e[0][u'body'] == u'Quota exceeded for the current request' or e[0][u'reason'] == u'Bad Gateway':
return False
if e.error_code == 600 and e[0][u'reason'] == u'Token invalid - Invalid token: Token disabled, revoked, or expired.':
return u'403 - Token disabled, revoked, or expired. Please delete and re-create oauth.txt'
# We got a "normal" error, define the mapping below
error_code_map = {
1000: False,
1001: False,
1002: u'Unauthorized and forbidden',
1100: u'User deleted recently',
1200: u'Domain user limit exceeded',
1201: u'Domain alias limit exceeded',
1202: u'Domain suspended',
1203: u'Domain feature unavailable',
1300: u'Entity %s exists' % getattr(e, u'invalidInput', u'<unknown>'),
1301: u'Entity %s Does Not Exist' % getattr(e, u'invalidInput', u'<unknown>'),
1302: u'Entity Name Is Reserved',
1303: u'Entity %s name not valid' % getattr(e, u'invalidInput', u'<unknown>'),
1306: u'%s has members. Cannot delete.' % getattr(e, u'invalidInput', u'<unknown>'),
1400: u'Invalid Given Name',
1401: u'Invalid Family Name',
1402: u'Invalid Password',
1403: u'Invalid Username',
1404: u'Invalid Hash Function Name',
1405: u'Invalid Hash Digest Length',
1406: u'Invalid Email Address',
1407: u'Invalid Query Parameter Value',
1408: u'Invalid SSO Signing Key',
1409: u'Invalid Encryption Public Key',
1410: u'Feature Unavailable For User',
1500: u'Too Many Recipients On Email List',
1501: u'Too Many Aliases For User',
1502: u'Too Many Delegates For User',
1601: u'Duplicate Destinations',
1602: u'Too Many Destinations',
1603: u'Invalid Route Address',
1700: u'Group Cannot Contain Cycle',
1800: u'Group Cannot Contain Cycle',
1801: u'Invalid value %s' % getattr(e, u'invalidInput', u'<unknown>'),
}
return u'{0} - {1}'.format(e.error_code, error_code_map.get(e.error_code, u'Unknown Error: {0}'.format(str(e))))
def callGData(service, function, soft_errors=False, throw_errors=[], **kwargs):
import gdata.apps.service
method = getattr(service, function)
retries = 10
for n in range(1, retries+1):
try:
return method(**kwargs)
except gdata.apps.service.AppsForYourDomainException, e:
terminating_error = checkGDataError(e, service)
if e.error_code in throw_errors:
raise
if not terminating_error and n != retries:
wait_on_fail = (2 ** n) if (2 ** n) < 60 else 60
randomness = float(random.randint(1, 1000)) / 1000
wait_on_fail = wait_on_fail + randomness
if n > 3:
sys.stderr.write(u'Temp error. Backing off %s seconds...' % (int(wait_on_fail)))
time.sleep(wait_on_fail)
if n > 3:
sys.stderr.write(u'attempt %s/%s\n' % (n+1, retries))
continue
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, terminating_error))
if soft_errors:
if n != 1:
sys.stderr.write(u' - Giving up.\n')
return None
sys.exit(int(e.error_code))
except oauth2client.client.AccessTokenRefreshError as e:
return handleOAuthTokenError(e, soft_errors)
def callGAPI(service, function, silent_errors=False, soft_errors=False, throw_reasons=[], retry_reasons=[], **kwargs):
method = getattr(service, function)
retries = 10
parameters = dict(kwargs.items() + GM_Globals[GM_EXTRA_ARGS_DICT].items())
for n in range(1, retries+1):
try:
return method(**parameters).execute()
except googleapiclient.errors.HttpError, e:
try:
error = json.loads(e.content)
except ValueError:
if n < 3:
service._http.request.credentials.refresh(httplib2.Http(disable_ssl_certificate_validation=GC_Values[GC_NO_VERIFY_SSL]))
continue
if (e.resp[u'status'] == u'503') and (e.content == u'Quota exceeded for the current request'):
time.sleep(1)
continue
if not silent_errors:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, e.content))
if soft_errors:
return None
sys.exit(5)
http_status = error[u'error'][u'code']
message = error[u'error'][u'errors'][0][u'message']
try:
reason = error[u'error'][u'errors'][0][u'reason']
except KeyError:
reason = http_status
if reason in throw_reasons:
raise e
if n != retries and (reason in [u'quotaExceeded', u'rateLimitExceeded', u'userRateLimitExceeded', u'backendError', u'internalError'] or reason in retry_reasons):
wait_on_fail = (2 ** n) if (2 ** n) < 60 else 60
randomness = float(random.randint(1, 1000)) / 1000
wait_on_fail = wait_on_fail + randomness
if n > 3:
sys.stderr.write(u'Temp error %s. Backing off %s seconds...' % (reason, int(wait_on_fail)))
time.sleep(wait_on_fail)
if n > 3:
sys.stderr.write(u'attempt %s/%s\n' % (n+1, retries))
continue
sys.stderr.write(u'{0}{1}: {2} - {3}\n'.format(ERROR_PREFIX, http_status, message, reason))
if soft_errors:
if n != 1:
sys.stderr.write(u' - Giving up.\n')
return None
sys.exit(int(http_status))
except oauth2client.client.AccessTokenRefreshError, e:
return handleOAuthTokenError(e, soft_errors)
except httplib2.CertificateValidationUnsupported:
noPythonSSLExit()
except TypeError, e:
systemErrorExit(4, e)
def callGAPIpages(service, function, items, page_message=None, message_attribute=None, **kwargs):
pageToken = None
all_pages = list()
total_items = 0
while True:
this_page = callGAPI(service, function, pageToken=pageToken, **kwargs)
if this_page:
pageToken = this_page.get(u'nextPageToken')
if items in this_page:
page_items = len(this_page[items])
total_items += page_items
all_pages.extend(this_page[items])
else:
this_page = {items: []}
page_items = 0
else:
pageToken = None
this_page = {items: []}
page_items = 0
if page_message:
show_message = page_message.replace(u'%%num_items%%', str(page_items))
show_message = show_message.replace(u'%%total_items%%', str(total_items))
if message_attribute:
try:
show_message = show_message.replace(u'%%first_item%%', str(this_page[items][0][message_attribute]))
show_message = show_message.replace(u'%%last_item%%', str(this_page[items][-1][message_attribute]))
except (IndexError, KeyError):
show_message = show_message.replace(u'%%first_item%%', u'')
show_message = show_message.replace(u'%%last_item%%', u'')
sys.stderr.write(u'\r')
sys.stderr.flush()
sys.stderr.write(show_message)
if not pageToken:
if page_message and (page_message[-1] != u'\n'):
sys.stderr.write(u'\r\n')
sys.stderr.flush()
return all_pages
API_VER_MAPPING = {
u'admin-settings': u'v1',
u'appsactivity': u'v1',
u'calendar': u'v3',
u'classroom': u'v1',
u'cloudprint': u'v2',
u'datatransfer': u'datatransfer_v1',
u'directory': u'directory_v1',
u'drive': u'v2',
u'email-audit': u'v1',
u'email-settings': u'v1',
u'gmail': u'v1',
u'groupssettings': u'v1',
u'licensing': u'v1',
u'reports': u'reports_v1',
u'siteVerification': u'v1',
}
def getAPIVersion(api):
version = API_VER_MAPPING.get(api, u'v1')
if api in [u'directory', u'reports', u'datatransfer']:
api = u'admin'
return (api, version, u'{0}-{1}'.format(api, version))
def getOAuth2ServiceDetails():
if not GM_Globals[GM_OAUTH2SERVICE_KEY]:
json_string = readFile(GC_Values[GC_OAUTH2SERVICE_JSON], continueOnError=True, displayError=True)
if not json_string:
printLine(MESSAGE_WIKI_INSTRUCTIONS_OAUTH2SERVICE_JSON)
printLine(GAM_WIKI_CREATE_CLIENT_SECRETS)
systemErrorExit(6, None)
try:
json_data = json.loads(json_string)
GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_EMAIL] = json_data[u'client_email']
GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID] = json_data[u'client_id']
GM_Globals[GM_OAUTH2SERVICE_KEY] = json_data[u'private_key']
except (ValueError, KeyError):
printLine(MESSAGE_WIKI_INSTRUCTIONS_OAUTH2SERVICE_JSON)
printLine(GAM_WIKI_CREATE_CLIENT_SECRETS)
invalidJSONExit(GC_Values[GC_OAUTH2SERVICE_JSON])
def readDiscoveryFile(api_version):
disc_filename = u'%s.json' % (api_version)
disc_file = os.path.join(GC_Values[GC_SITE_DIR], disc_filename)
if hasattr(sys, u'_MEIPASS'):
pyinstaller_disc_file = os.path.join(sys._MEIPASS, disc_filename)
else:
pyinstaller_disc_file = None
if os.path.isfile(disc_file):
json_string = readFile(disc_file)
elif pyinstaller_disc_file:
json_string = readFile(pyinstaller_disc_file)
else:
systemErrorExit(11, MESSAGE_NO_DISCOVERY_INFORMATION.format(disc_file))
try:
discovery = json.loads(json_string)
return (disc_file, discovery)
except ValueError:
invalidJSONExit(disc_file)
def getAPIversionHttpService(api):
getOAuth2ServiceDetails()
api, version, api_version = getAPIVersion(api)
http = httplib2.Http(disable_ssl_certificate_validation=GC_Values[GC_NO_VERIFY_SSL],
cache=GC_Values[GC_CACHE_DIR])
try:
service = googleapiclient.discovery.build(api, version, http=http, cache_discovery=False)
return (api_version, http, service)
except httplib2.ServerNotFoundError as e:
systemErrorExit(4, e)
except googleapiclient.errors.UnknownApiNameOrVersion:
pass
disc_file, discovery = readDiscoveryFile(api_version)
try:
service = googleapiclient.discovery.build_from_document(discovery, http=http)
return (api_version, http, service)
except (ValueError, KeyError):
invalidJSONExit(disc_file)
def buildGAPIObject(api, act_as=None, soft_errors=False):
svcsub = act_as if act_as else GM_Globals[GM_ADMIN]
_, http, service = getAPIversionHttpService(api)
GM_Globals[GM_CURRENT_API_USER] = act_as
GM_Globals[GM_CURRENT_API_SCOPES] = list(set(service._rootDesc[u'auth'][u'oauth2'][u'scopes'].keys()).intersection(GM_Globals[GM_GAMSCOPES_LIST]))
if not GM_Globals[GM_CURRENT_API_SCOPES]:
systemErrorExit(15, MESSAGE_NO_SCOPES_FOR_API.format(service._rootDesc[u'title']))
credentials = oauth2client.client.SignedJwtAssertionCredentials(GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_EMAIL],
GM_Globals[GM_OAUTH2SERVICE_KEY],
scope=GM_Globals[GM_CURRENT_API_SCOPES], user_agent=GAM_INFO, sub=svcsub)
try:
service._http = credentials.authorize(http)
except httplib2.ServerNotFoundError as e:
systemErrorExit(4, e)
except oauth2client.client.AccessTokenRefreshError, e:
return handleOAuthTokenError(e, soft_errors)
return service
def initGDataObject(gdataObj, api):
getOAuth2ServiceDetails()
_, _, api_version = getAPIVersion(api)
disc_file, discovery = readDiscoveryFile(api_version)
GM_Globals[GM_CURRENT_API_USER] = None
try:
GM_Globals[GM_CURRENT_API_SCOPES] = list(set(discovery[u'auth'][u'oauth2'][u'scopes'].keys()).intersection(GM_Globals[GM_GAMSCOPES_LIST]))
except KeyError:
invalidJSONExit(disc_file)
if not GM_Globals[GM_CURRENT_API_SCOPES]:
systemErrorExit(15, MESSAGE_NO_SCOPES_FOR_API.format(discovery.get(u'title', api_version)))
getGDataOAuthToken(gdataObj)
gdataObj.domain = GC_Values[GC_DOMAIN]
#Identify GAM to Google's Servers
gdataObj.source = GAM_INFO
#Show debugging output if debug.gam exists
if GC_Values[GC_DEBUG_LEVEL] > 0:
gdataObj.debug = True
return gdataObj
def getAdminSettingsObject():
import gdata.apps.adminsettings.service
return initGDataObject(gdata.apps.adminsettings.service.AdminSettingsService(), u'admin-settings')
def getAuditObject():
import gdata.apps.audit.service
return initGDataObject(gdata.apps.audit.service.AuditService(), u'email-audit')
def getEmailSettingsObject():
import gdata.apps.emailsettings.service
return initGDataObject(gdata.apps.emailsettings.service.EmailSettingsService(), u'email-settings')
def geturl(url, dst):
import urllib2
u = urllib2.urlopen(url)
f = openFile(dst, u'wb')
meta = u.info()
try:
file_size = int(meta.getheaders(u'Content-Length')[0])
except IndexError:
file_size = -1
file_size_dl = 0
block_sz = 8192
while True:
filebuff = u.read(block_sz)
if not filebuff:
break
file_size_dl += len(filebuff)
f.write(filebuff)
if file_size != -1:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
else:
status = r"%10d [unknown size]" % (file_size_dl)
status = status + chr(8)*(len(status)+1)
print status,
closeFile(f)
def showReport():
rep = buildGAPIObject(u'reports')
report = sys.argv[2].lower()
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
date = filters = parameters = actorIpAddress = startTime = endTime = eventName = None
to_drive = False
userKey = u'all'
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'date':
date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'start':
startTime = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'end':
endTime = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'event':
eventName = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'user':
userKey = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'filter', u'filters']:
filters = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'fields', u'parameters']:
parameters = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'ip':
actorIpAddress = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
to_drive = True
i += 1
else:
print u'ERROR: %s is not a valid argument to "gam report"' % sys.argv[i]
sys.exit(2)
try_date = date
if try_date == None:
try_date = datetime.date.today()
if report in [u'users', u'user']:
while True:
try:
page_message = u'Got %%num_items%% users\n'
usage = callGAPIpages(rep.userUsageReport(), u'get', u'usageReports', page_message=page_message, throw_reasons=[u'invalid'],
date=str(try_date), userKey=userKey, customerId=customerId, filters=filters, parameters=parameters)
break
except googleapiclient.errors.HttpError, e:
error = json.loads(e.content)
try:
message = error[u'error'][u'errors'][0][u'message']
except KeyError:
raise
match_date = re.match(u'Data for dates later than (.*) is not yet available. Please check back later', message)
if not match_date:
print u'Error: %s' % message
sys.exit(4)
else:
try_date = match_date.group(1)
user_attributes = []
titles = [u'email', u'date']
for user_report in usage:
row = {u'email': user_report[u'entity'][u'userEmail'], u'date': str(try_date)}
try:
for report_item in user_report[u'parameters']:
items = report_item.values()
name = items[1]
value = items[0]
if not name in titles:
titles.append(name)
row[name] = value
except KeyError:
pass
user_attributes.append(row)
header = {}
for title in titles:
header[title] = title
user_attributes.insert(0, header)
output_csv(user_attributes, titles, u'User Reports - %s' % try_date, to_drive)
elif report in [u'customer', u'customers', u'domain']:
while True:
try:
usage = callGAPIpages(rep.customerUsageReports(), u'get', u'usageReports', throw_reasons=[u'invalid'],
customerId=customerId, date=str(try_date), parameters=parameters)
break
except googleapiclient.errors.HttpError, e:
error = json.loads(e.content)
try:
message = error[u'error'][u'errors'][0][u'message']
except KeyError:
raise
match_date = re.match(u'Data for dates later than (.*) is not yet available. Please check back later', message)
if not match_date:
print u'Error: %s' % message
sys.exit(4)
else:
try_date = match_date.group(1)
cust_attributes = [{u'name': u'name', u'value': u'value', u'client_id': u'client_id'}]
titles = [u'name', u'value', u'client_id']
auth_apps = list()
for item in usage[0][u'parameters']:
name = item[u'name']
try:
value = item[u'intValue']
except KeyError:
if name == u'accounts:authorized_apps':
for subitem in item[u'msgValue']:
app = dict()
for an_item in subitem:
if an_item == u'client_name':
app[u'name'] = u'App: %s' % subitem[an_item]
elif an_item == u'num_users':
app[u'value'] = u'%s users' % subitem[an_item]
elif an_item == u'client_id':
app[u'client_id'] = subitem[an_item]
auth_apps.append(app)
continue
cust_attributes.append({u'name': name, u'value': value})
for app in auth_apps: # put apps at bottom
cust_attributes.append(app)
output_csv(csv_list=cust_attributes, titles=titles, list_type=u'Customer Report - %s' % try_date, todrive=to_drive)
elif report in [u'doc', u'docs', u'drive',
u'calendar', u'calendars',
u'login', u'logins',
u'admin',
u'token', u'tokens',
u'group', u'groups',
u'mobile']:
if report in [u'doc', u'docs']:
report = u'drive'
elif report in [u'calendars']:
report = u'calendar'
elif report == u'logins':
report = u'login'
elif report == u'tokens':
report = u'token'
elif report == u'group':
report = u'groups'
page_message = u'Got %%num_items%% items\n'
activities = callGAPIpages(rep.activities(), u'list', u'items', page_message=page_message, applicationName=report,
userKey=userKey, customerId=customerId, actorIpAddress=actorIpAddress,
startTime=startTime, endTime=endTime, eventName=eventName, filters=filters)
if len(activities) > 0:
attrs = []
titles = []
for activity in activities:
events = activity[u'events']
del activity[u'events']
activity_row = flatten_json(activity)
for event in events:
row = flatten_json(event)
row.update(activity_row)
for item in row:
if item not in titles:
titles.append(item)
attrs.append(row)
header = {}
titles.remove(u'name')
titles = sorted(titles)
titles.insert(0, u'name')
for title in titles:
header[title] = title
attrs.insert(0, header)
cap_report = u'%s%s' % (report[0].upper(), report[1:])
output_csv(attrs, titles, u'%s Activity Report' % cap_report, to_drive)
def doDelegates(users):
import gdata.apps.service
emailsettings = getEmailSettingsObject()
if sys.argv[4].lower() == u'to':
delegate = sys.argv[5].lower()
if not delegate.find(u'@') > 0:
delegate_domain = GC_Values[GC_DOMAIN].lower()
delegate_email = u'%s@%s' % (delegate, delegate_domain)
else:
delegate_domain = delegate[delegate.find(u'@')+1:].lower()
delegate_email = delegate
else:
print u'ERROR: %s is not a valid argument for "gam <users> delegate", expected to' % sys.argv[4]
sys.exit(2)
count = len(users)
i = 1
for delegator in users:
if delegator.find(u'@') > 0:
delegator_domain = delegator[delegator.find(u'@')+1:].lower()
delegator_email = delegator
delegator = delegator[:delegator.find(u'@')]
else:
delegator_domain = GC_Values[GC_DOMAIN].lower()
delegator_email = u'%s@%s' % (delegator, delegator_domain)
emailsettings.domain = delegator_domain
print u"Giving %s delegate access to %s (%s of %s)" % (delegate_email, delegator_email, i, count)
i += 1
delete_alias = False
if delegate_domain == delegator_domain:
use_delegate_address = delegate_email
else:
# Need to use an alias in delegator domain, first check to see if delegate already has one...
cd = buildGAPIObject(u'directory')
aliases = callGAPI(cd.users().aliases(), u'list', userKey=delegate_email)
found_alias_in_delegator_domain = False
try:
for alias in aliases[u'aliases']:
alias_domain = alias[u'alias'][alias[u'alias'].find(u'@')+1:].lower()
if alias_domain == delegator_domain:
use_delegate_address = alias[u'alias']
print u' Using existing alias %s for delegation' % use_delegate_address
found_alias_in_delegator_domain = True
break
except KeyError:
pass
if not found_alias_in_delegator_domain:
delete_alias = True
use_delegate_address = u'%s@%s' % (u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz0123456789', 25)), delegator_domain)
print u' Giving %s temporary alias %s for delegation' % (delegate_email, use_delegate_address)
callGAPI(cd.users().aliases(), u'insert', userKey=delegate_email, body={u'alias': use_delegate_address})
time.sleep(5)
retries = 10
for n in range(1, retries+1):
try:
callGData(emailsettings, u'CreateDelegate', throw_errors=[600, 1000, 1001], delegate=use_delegate_address, delegator=delegator)
break
except gdata.apps.service.AppsForYourDomainException, e:
# 1st check to see if delegation already exists (causes 1000 error on create when using alias)
get_delegates = callGData(emailsettings, u'GetDelegates', delegator=delegator)
for get_delegate in get_delegates:
if get_delegate[u'address'].lower() == delegate_email: # Delegation is already in place
print u'That delegation is already in place...'
if delete_alias:
print u' Deleting temporary alias...'
doDeleteAlias(alias_email=use_delegate_address)
sys.exit(0) # Emulate functionality of duplicate delegation between users in same domain, returning clean
# Now check if either user account is suspended or requires password change
cd = buildGAPIObject(u'directory')
delegate_user_details = callGAPI(cd.users(), u'get', userKey=delegate_email)
delegator_user_details = callGAPI(cd.users(), u'get', userKey=delegator_email)
if delegate_user_details[u'suspended'] == True:
sys.stderr.write(u'ERROR: User %s is suspended. You must unsuspend for delegation.\n' % delegate_email)
if delete_alias:
doDeleteAlias(alias_email=use_delegate_address)
sys.exit(5)
if delegator_user_details[u'suspended'] == True:
sys.stderr.write(u'ERROR: User %s is suspended. You must unsuspend for delegation.\n' % delegator_email)
if delete_alias:
doDeleteAlias(alias_email=use_delegate_address)
sys.exit(5)
if delegate_user_details[u'changePasswordAtNextLogin'] == True:
sys.stderr.write(u'ERROR: User %s is required to change password at next login. You must change password or clear changepassword flag for delegation.\n' % delegate_email)
if delete_alias:
doDeleteAlias(alias_email=use_delegate_address)
sys.exit(5)
if delegator_user_details[u'changePasswordAtNextLogin'] == True:
sys.stderr.write(u'ERROR: User %s is required to change password at next login. You must change password or clear changepassword flag for delegation.\n' % delegator_email)
if delete_alias:
doDeleteAlias(alias_email=use_delegate_address)
sys.exit(5)
# Guess it was just a normal backoff error then?
if n == retries:
sys.stderr.write(u' - giving up.')
sys.exit(e.error_code)
wait_on_fail = (2 ** n) if (2 ** n) < 60 else 60
randomness = float(random.randint(1, 1000)) / 1000
wait_on_fail = wait_on_fail + randomness
if n > 3:
sys.stderr.write(u'Temp error. Backing off %s seconds...' % (int(wait_on_fail)))
time.sleep(wait_on_fail)
if n > 3:
sys.stderr.write(u'attempt %s/%s\n' % (n+1, retries))
time.sleep(10) # on success, sleep 10 seconds before exiting or moving on to next user to prevent ghost delegates
if delete_alias:
doDeleteAlias(alias_email=use_delegate_address)
def gen_sha512_hash(password):
from passlib.handlers.sha2_crypt import sha512_crypt
return sha512_crypt.encrypt(password, rounds=5000)
def getDelegates(users):
emailsettings = getEmailSettingsObject()
csv_format = False
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'csv':
csv_format = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> show delegates"' % sys.argv[i]
sys.exit(2)
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
sys.stderr.write(u"Getting delegates for %s...\n" % (user + u'@' + emailsettings.domain))
delegates = callGData(emailsettings, u'GetDelegates', soft_errors=True, delegator=user)
try:
for delegate in delegates:
if csv_format:
print u'%s,%s,%s' % (user + u'@' + emailsettings.domain, delegate[u'address'], delegate[u'status'])
else:
print convertUTF8(u"Delegator: %s\n Delegate: %s\n Status: %s\n Delegate Email: %s\n Delegate ID: %s\n" % (user, delegate[u'delegate'], delegate[u'status'], delegate[u'address'], delegate[u'delegationId']))
except TypeError:
pass
def deleteDelegate(users):
emailsettings = getEmailSettingsObject()
delegate = sys.argv[5]
if not delegate.find(u'@') > 0:
if users[0].find(u'@') > 0:
delegatedomain = users[0][users[0].find(u'@')+1:]
else:
delegatedomain = GC_Values[GC_DOMAIN]
delegate = delegate+u'@'+delegatedomain
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Deleting %s delegate access to %s (%s of %s)" % (delegate, user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'DeleteDelegate', delegate=delegate, delegator=user)
def doAddCourseParticipant():
croom = buildGAPIObject(u'classroom')
courseId = sys.argv[2]
body_attribute = u'userId'
if len(courseId) < 3 or (not courseId.isdigit() and courseId[:2] != u'd:'):
courseId = u'd:%s' % courseId
participant_type = sys.argv[4].lower()
new_id = sys.argv[5]
if participant_type in [u'teacher', u'teachers']:
service = croom.courses().teachers()
elif participant_type in [u'students', u'student']:
service = croom.courses().students()
elif participant_type in [u'alias']:
service = croom.courses().aliases()
body_attribute = u'alias'
if new_id[1] != u':':
new_id = u'd:%s' % new_id
else:
print u'ERROR: %s is not a valid argument to "gam course ID add"' % participant_type
sys.exit(2)
body = {body_attribute: new_id}
callGAPI(service, u'create', courseId=courseId, body=body)
if courseId[:2] == u'd:':
courseId = courseId[2:]
if new_id[:2] == u'd:':
new_id = new_id[2:]
print u'Added %s as a %s of course %s' % (new_id, participant_type, courseId)
def doSyncCourseParticipants():
courseId = sys.argv[2]
if not courseId.isdigit() and courseId[:2] != u'd:':
courseId = u'd:%s' % courseId
participant_type = sys.argv[4].lower()
diff_entity_type = sys.argv[5]
diff_entity = sys.argv[6]
current_course_users = getUsersToModify(entity_type=participant_type, entity=courseId)
print
current_course_users = [x.lower() for x in current_course_users]
if diff_entity_type == u'courseparticipants':
diff_entity_type = participant_type
diff_against_users = getUsersToModify(entity_type=diff_entity_type, entity=diff_entity)
print
diff_against_users = [x.lower() for x in diff_against_users]
to_add = list(set(diff_against_users) - set(current_course_users))
to_remove = list(set(current_course_users) - set(diff_against_users))
gam_commands = []
for add_email in to_add:
gam_commands.append([u'course', courseId, u'add', participant_type, add_email])
for remove_email in to_remove:
gam_commands.append([u'course', courseId, u'remove', participant_type, remove_email])
run_batch(gam_commands)
def doDelCourseParticipant():
croom = buildGAPIObject(u'classroom')
courseId = sys.argv[2]
if not courseId.isdigit() and courseId[:2] != u'd:':
courseId = u'd:%s' % courseId
participant_type = sys.argv[4].lower()
remove_id = sys.argv[5]
kwargs = {}
if participant_type in [u'teacher', u'teachers']:
service = croom.courses().teachers()
kwargs[u'userId'] = remove_id
elif participant_type in [u'student', u'students']:
service = croom.courses().students()
kwargs[u'userId'] = remove_id
elif participant_type in [u'alias']:
service = croom.courses().aliases()
if remove_id[1] != u':':
remove_id = u'd:%s' % remove_id
kwargs[u'alias'] = remove_id
else:
print u'ERROR: %s is not a valid argument to "gam course ID delete"' % participant_type
sys.exit(2)
callGAPI(service, u'delete', courseId=courseId, **kwargs)
if courseId[:2] == u'd:':
courseId = courseId[2:]
if remove_id[:2] == u'd:':
remove_id = remove_id[2:]
print u'Removed %s as a %s of course %s' % (remove_id, participant_type, courseId)
def doDelCourse():
croom = buildGAPIObject(u'classroom')
courseId = sys.argv[3]
if not courseId.isdigit():
courseId = u'd:%s' % courseId
callGAPI(croom.courses(), u'delete', id=courseId)
print u'Deleted Course %s' % courseId
def doUpdateCourse():
croom = buildGAPIObject(u'classroom')
courseId = sys.argv[3]
if not courseId.isdigit():
courseId = u'd:%s' % courseId
body = {}
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'name'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'section':
body[u'section'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'heading':
body[u'descriptionHeading'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'room':
body[u'room'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'state', u'status']:
body[u'courseState'] = sys.argv[i+1].upper()
if body[u'courseState'] not in [u'ACTIVE', u'ARCHIVED', u'PROVISIONED', u'DECLINED']:
print u'ERROR: course state can be active or archived. Got %s' % body[u'courseState']
sys.exit(2)
i += 2
else:
print u'ERROR: %s is not a valid argument to "gam update course"' % sys.argv[i]
sys.exit(2)
updateMask = u','.join(body.keys())
body[u'id'] = courseId
result = callGAPI(croom.courses(), u'patch', id=courseId, body=body, updateMask=updateMask)
print u'Updated Course %s' % result[u'id']
def doCreateDomain():
cd = buildGAPIObject(u'directory')
domain_name = sys.argv[3]
body = {u'domainName': domain_name}
callGAPI(cd.domains(), u'insert', customer=GC_Values[GC_CUSTOMER_ID], body=body)
print u'Added domain %s' % domain_name
def doCreateDomainAlias():
cd = buildGAPIObject(u'directory')
body = {}
body[u'domainAliasName'] = sys.argv[3]
body[u'parentDomainName'] = sys.argv[4]
callGAPI(cd.domainAliases(), u'insert', customer=GC_Values[GC_CUSTOMER_ID], body=body)
def doUpdateDomain():
cd = buildGAPIObject(u'directory')
domain_name = sys.argv[3]
i = 4
body = {}
while i < len(sys.argv):
if sys.argv[i].lower() == u'primary':
body[u'customerDomain'] = domain_name
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam update domain"' % sys.argv[i]
sys.exit(2)
callGAPI(cd.customers(), u'update', customerKey=GC_Values[GC_CUSTOMER_ID], body=body)
print u'%s is now the primary domain.' % domain_name
def doGetDomainInfo():
if (len(sys.argv) < 4) or (sys.argv[3] == u'logo'):
doGetInstanceInfo()
return
cd = buildGAPIObject(u'directory')
domainName = sys.argv[3]
result = callGAPI(cd.domains(), u'get', customer=GC_Values[GC_CUSTOMER_ID], domainName=domainName)
if u'creationTime' in result:
result[u'creationTime'] = unicode(datetime.datetime.fromtimestamp(int(result[u'creationTime'])/1000))
if u'domainAliases' in result:
for i in range(0, len(result[u'domainAliases'])):
if u'creationTime' in result[u'domainAliases'][i]:
result[u'domainAliases'][i][u'creationTime'] = unicode(datetime.datetime.fromtimestamp(int(result[u'domainAliases'][i][u'creationTime'])/1000))
print_json(None, result)
def doGetDomainAliasInfo():
cd = buildGAPIObject(u'directory')
alias = sys.argv[3]
result = callGAPI(cd.domainAliases(), u'get', customer=GC_Values[GC_CUSTOMER_ID], domainAliasName=alias)
if u'creationTime' in result:
result[u'creationTime'] = unicode(datetime.datetime.fromtimestamp(int(result[u'creationTime'])/1000))
print_json(None, result)
def doGetCustomerInfo():
cd = buildGAPIObject(u'directory')
customer_info = callGAPI(cd.customers(), u'get', customerKey=GC_Values[GC_CUSTOMER_ID])
print_json(None, customer_info)
def doUpdateCustomer():
cd = buildGAPIObject(u'directory')
body = {}
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace(u'_', u'')
if myarg == u'alternateemail':
body[u'alternateEmail'] = sys.argv[i+1]
i += 2
elif myarg in [u'contactname', u'organizationname', u'locality', u'region', u'countrycode', u'addressline1', u'addressline2', u'addressline3', u'postalcode']:
if u'postalAddress' not in body:
body[u'postalAddress'] = {}
if myarg == u'contactname':
myarg = u'contactName'
elif myarg == u'organizationname':
myarg = u'organizationName'
elif myarg == u'countrycode':
myarg = u'countryCode'
elif myarg == u'addressline1':
myarg = u'addressLine1'
elif myarg == u'addressline2':
myarg = u'addressLine2'
elif myarg == u'addressline3':
myarg = u'addressLine3'
elif myarg == u'postalcode':
myarg = u'postalCode'
body[u'postalAddress'][myarg] = sys.argv[i+1]
i += 2
elif myarg in [u'phone', u'phonenumber']:
body[u'phoneNumber'] = sys.argv[i+1]
i += 2
elif myarg == u'language':
body[u'language'] = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam update customer"' % myarg
sys.exit(2)
callGAPI(cd.customers(), u'update', customerKey=GC_Values[GC_CUSTOMER_ID], body=body)
print u'Updated customer'
def doDelDomain():
cd = buildGAPIObject(u'directory')
domainName = sys.argv[3]
callGAPI(cd.domains(), u'delete', customer=GC_Values[GC_CUSTOMER_ID], domainName=domainName)
def doDelDomainAlias():
cd = buildGAPIObject(u'directory')
domainAliasName = sys.argv[3]
callGAPI(cd.domainAliases(), u'delete', customer=GC_Values[GC_CUSTOMER_ID], domainAliasName=domainAliasName)
def doPrintDomains():
cd = buildGAPIObject(u'directory')
titles = []
domains_attributes = [{}]
todrive = False
domains = callGAPI(cd.domains(), u'list', customer=GC_Values[GC_CUSTOMER_ID])
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print domains".' % sys.argv[i]
sys.exit(2)
for domain in domains[u'domains']:
domain_attributes = {}
if domain[u'isPrimary'] == True:
domain[u'type'] = u'primary'
else:
domain[u'type'] = u'secondary'
for attr in domain:
if attr in [u'kind', u'domainAliases', u'etag', u'etags', u'isPrimary']:
continue
elif attr in [u'creationTime',]:
domain[attr] = unicode(datetime.datetime.fromtimestamp(int(domain[attr])/1000))
if attr not in titles:
titles.append(attr)
domains_attributes[0][attr] = attr
domain_attributes[attr] = domain[attr]
domains_attributes.append(domain_attributes)
if u'domainAliases' in domain:
for aliasdomain in domain[u'domainAliases']:
aliasdomain[u'domainName'] = aliasdomain[u'domainAliasName']
del aliasdomain[u'domainAliasName']
aliasdomain[u'type'] = u'alias'
aliasdomain_attributes = {}
for attr in aliasdomain:
if attr in [u'kind', u'etag']:
continue
elif attr in [u'creationTime',]:
aliasdomain[attr] = unicode(datetime.datetime.fromtimestamp(int(aliasdomain[attr])/1000))
if attr not in titles:
titles.append(attr)
domains_attributes[0][attr] = attr
aliasdomain_attributes[attr] = aliasdomain[attr]
domains_attributes.append(aliasdomain_attributes)
output_csv(domains_attributes, titles, u'Domains', todrive)
def doDelAdmin():
cd = buildGAPIObject(u'directory')
roleAssignmentId = sys.argv[3]
print u'Deleting Admin Role Assignment %s' % roleAssignmentId
callGAPI(cd.roleAssignments(), u'delete',
customer=GC_Values[GC_CUSTOMER_ID], roleAssignmentId=roleAssignmentId)
def doCreateAdmin():
cd = buildGAPIObject(u'directory')
body = {}
user = sys.argv[3]
if user[:4].lower() == u'uid:':
body[u'assignedTo'] = user[4:]
else:
print user[:3]
body[u'assignedTo'] = callGAPI(cd.users(), u'get',
userKey=user, projection=u'basic', fields=u'id')[u'id']
role = sys.argv[4]
if role[:4].lower() == u'uid:':
body[u'roleId'] = role[4:]
else:
body[u'roleId'] = roleid_from_role(role)
if not body[u'roleId']:
print u'ERROR: %s is not a valid role. Please ensure role name is exactly as shown in admin console.' % role
sys.exit(4)
body[u'scopeType'] = sys.argv[5].upper()
if body[u'scopeType'] not in [u'CUSTOMER', u'ORG_UNIT']:
print u'ERROR: scope type must be customer or org_unit, got %s' % body[u'scopeType']
sys.exit(3)
if body[u'scopeType'] == u'ORG_UNIT':
orgUnit = sys.argv[6]
if orgUnit[:3] == u'id:':
body[u'orgUnitId'] = orgUnit[3:]
elif orgUnit[:4] == u'uid:':
body[u'orgUnitId'] = orgUnit[4:]
else:
if orgUnit[0] == u'/':
orgUnit = orgUnit[1:]
body[u'orgUnitId'] = callGAPI(cd.orgunits(), u'get',
customerId=GC_Values[GC_CUSTOMER_ID], orgUnitPath=orgUnit,
fields=u'orgUnitId')[u'orgUnitId'][3:]
if body[u'scopeType'] == u'CUSTOMER':
scope = u'customer'
else:
scope = orgUnit
print u'Giving %s admin role %s for %s' % (user, role, scope)
callGAPI(cd.roleAssignments(), u'insert',
customer=GC_Values[GC_CUSTOMER_ID], body=body)
def doPrintAdminRoles():
cd = buildGAPIObject(u'directory')
roles = callGAPIpages(cd.roles(), u'list', u'items',
customer=GC_Values[GC_CUSTOMER_ID])
roles_attrib = [{}]
todrive = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print adminroles".' % sys.argv[i]
sys.exit(2)
for role in roles:
role_attrib = {}
for key, value in role.items():
if key in [u'kind', u'etag', u'etags']:
continue
if not isinstance(value, (str, unicode, bool)):
continue
if key not in roles_attrib[0]:
roles_attrib[0][key] = key
role_attrib[key] = value
roles_attrib.append(role_attrib)
output_csv(roles_attrib, roles_attrib[0], u'Admin Roles', todrive)
def doPrintAdmins():
cd = buildGAPIObject(u'directory')
roleId = None
userKey = None
todrive = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'user':
userKey = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'role':
role = sys.argv[i+1]
if role[:4].lower() == u'uid:':
roleId = role[4:]
else:
roleId = roleid_from_role(role)
if not roleId:
print u'ERROR: %s is not a valid role' % role
sys.exit(5)
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print admins".' % sys.argv[i]
sys.exit(2)
admins = callGAPIpages(cd.roleAssignments(), u'list', u'items',
customer=GC_Values[GC_CUSTOMER_ID], userKey=userKey, roleId=roleId, maxResults=200)
admins_attrib = [{}]
for admin in admins:
admin_attrib = {}
for key, value in admin.items():
if key in [u'kind', u'etag']:
continue
if key not in admins_attrib[0]:
admins_attrib[0][key] = key
if key == u'assignedTo':
assignedToUser = user_from_userid(value)
if u'assignedToUser' not in admins_attrib[0]:
admins_attrib[0][u'assignedToUser'] = u'assignedToUser'
admin_attrib[u'assignedToUser'] = assignedToUser
elif key == u'roleId':
role = role_from_roleid(value)
if u'role' not in admins_attrib[0]:
admins_attrib[0][u'role'] = u'role'
admin_attrib[u'role'] = role
elif key == u'orgUnitId':
value = u'id:%s' % value
orgUnit = orgunit_from_orgunitid(value)
if u'orgUnit' not in admins_attrib[0]:
admins_attrib[0][u'orgUnit'] = u'orgUnit'
admin_attrib[u'orgUnit'] = orgUnit
admin_attrib[key] = value
admins_attrib.append(admin_attrib)
output_csv(admins_attrib, admins_attrib[0], u'Admins', todrive)
def buildOrgUnitIdToNameMap():
cd = buildGAPIObject(u'directory')
result = callGAPI(cd.orgunits(), u'list',
customerId=GC_Values[GC_CUSTOMER_ID],
fields=u'organizationUnits(orgUnitPath,orgUnitId)', type=u'all')
GM_Globals[GM_MAP_ORGUNIT_ID_TO_NAME] = {}
for orgUnit in result[u'organizationUnits']:
GM_Globals[GM_MAP_ORGUNIT_ID_TO_NAME][orgUnit[u'orgUnitId']] = orgUnit[u'orgUnitPath']
def orgunit_from_orgunitid(orgunitid):
if not GM_Globals[GM_MAP_ORGUNIT_ID_TO_NAME]:
buildOrgUnitIdToNameMap()
return GM_Globals[GM_MAP_ORGUNIT_ID_TO_NAME][orgunitid]
def buildRoleIdToNameToIdMap():
cd = buildGAPIObject(u'directory')
result = callGAPIpages(cd.roles(), u'list', u'items',
customer=GC_Values[GC_CUSTOMER_ID],
fields=u'nextPageToken,items(roleId,roleName)',
maxResults=100)
GM_Globals[GM_MAP_ROLE_ID_TO_NAME] = {}
GM_Globals[GM_MAP_ROLE_NAME_TO_ID] = {}
for role in result:
GM_Globals[GM_MAP_ROLE_ID_TO_NAME][role[u'roleId']] = role[u'roleName']
GM_Globals[GM_MAP_ROLE_NAME_TO_ID][role[u'roleName']] = role[u'roleId']
def role_from_roleid(roleid):
if not GM_Globals[GM_MAP_ROLE_ID_TO_NAME]:
buildRoleIdToNameToIdMap()
return GM_Globals[GM_MAP_ROLE_ID_TO_NAME][roleid]
def roleid_from_role(role):
if not GM_Globals[GM_MAP_ROLE_NAME_TO_ID]:
buildRoleIdToNameToIdMap()
return GM_Globals[GM_MAP_ROLE_NAME_TO_ID].get(role, None)
def buildUserIdToNameMap():
cd = buildGAPIObject(u'directory')
result = callGAPIpages(cd.users(), u'list', u'users',
customer=GC_Values[GC_CUSTOMER_ID],
fields=u'nextPageToken,users(id,primaryEmail)',
maxResults=GC_Values[GC_USER_MAX_RESULTS])
GM_Globals[GM_MAP_USER_ID_TO_NAME] = {}
for user in result:
GM_Globals[GM_MAP_USER_ID_TO_NAME][user[u'id']] = user[u'primaryEmail']
def user_from_userid(userid):
if not GM_Globals[GM_MAP_USER_ID_TO_NAME]:
buildUserIdToNameMap()
return GM_Globals[GM_MAP_USER_ID_TO_NAME].get(userid, u'')
SERVICE_NAME_TO_ID_MAP = {
u'Drive': u'55656082996',
u'Google+': '553547912911',
}
def appID2app(dt, appID):
for serviceName, serviceID in SERVICE_NAME_TO_ID_MAP.items():
if appID == serviceID:
return serviceName
online_services = callGAPIpages(dt.applications(), u'list', u'applications', customerId=GC_Values[GC_CUSTOMER_ID])
for online_service in online_services:
if appID == online_service[u'id']:
return online_service[u'name']
print u'ERROR: %s is not a valid app ID for data transfer.' % appID
sys.exit(2)
SERVICE_NAME_CHOICES_MAP = {
u'googleplus': u'Google+',
u'gplus': u'Google+',
u'g+': u'Google+',
u'googledrive': u'Drive',
u'gdrive': u'Drive',
}
def app2appID(dt, app):
serviceName = app.lower()
if serviceName in SERVICE_NAME_CHOICES_MAP:
return (SERVICE_NAME_CHOICES_MAP[serviceName], SERVICE_NAME_TO_ID_MAP[SERVICE_NAME_CHOICES_MAP[serviceName]])
online_services = callGAPIpages(dt.applications(), u'list', u'applications', customerId=GC_Values[GC_CUSTOMER_ID])
for online_service in online_services:
if serviceName == online_service[u'name'].lower():
return online_service[u'id']
print u'ERROR: %s is not a valid service for data transfer.' % app
sys.exit(2)
def convertToUserID(user):
if user[:4].lower() == u'uid:':
return user[4:]
cd = buildGAPIObject(u'directory')
if user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
try:
return callGAPI(cd.users(), u'get', throw_reasons=[u'notFound'], userKey=user, fields=u'id')[u'id']
except googleapiclient.errors.HttpError:
print u'ERROR: no such user %s' % user
sys.exit(3)
def convertUserIDtoEmail(uid):
cd = buildGAPIObject(u'directory')
try:
return callGAPI(cd.users(), u'get', throw_reasons=[u'notFound'], userKey=uid, fields=u'primaryEmail')[u'primaryEmail']
except googleapiclient.errors.HttpError:
print u'ERROR: no such user %s' % id
sys.exit(3)
def doCreateDataTranfer():
dt = buildGAPIObject(u'datatransfer')
body = {}
old_owner = sys.argv[3]
body[u'oldOwnerUserId'] = convertToUserID(old_owner)
service = sys.argv[4]
new_owner = sys.argv[5]
body[u'newOwnerUserId'] = convertToUserID(new_owner)
parameters = {}
i = 6
while i < len(sys.argv):
parameters[sys.argv[i].upper()] = sys.argv[i+1].upper().split(u',')
i += 2
body[u'applicationDataTransfers'] = [{u'applicationId': app2appID(dt, service)}]
for key in parameters:
if u'applicationDataTransferParams' not in body[u'applicationDataTransfers'][0]:
body[u'applicationDataTransfers'][0][u'applicationTransferParams'] = []
body[u'applicationDataTransfers'][0][u'applicationTransferParams'].append({u'key': key, u'value': parameters[key]})
result = callGAPI(dt.transfers(), u'insert', body=body, fields=u'id')[u'id']
print u'Submitted request id %s to transfer %s from %s to %s' % (result, service, old_owner, new_owner)
def doPrintTransferApps():
dt = buildGAPIObject(u'datatransfer')
apps = callGAPIpages(dt.applications(), u'list', u'applications', customerId=GC_Values[GC_CUSTOMER_ID])
for app in apps:
print_json(None, app)
print
def doPrintDataTransfers():
dt = buildGAPIObject(u'datatransfer')
i = 3
newOwnerUserId = None
oldOwnerUserId = None
status = None
todrive = False
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') in [u'olduser', u'oldowner']:
oldOwnerUserId = convertToUserID(sys.argv[i+1])
i += 2
elif sys.argv[i].lower().replace(u'_', u'') in [u'newuser', u'newowner']:
newOwnerUserId = convertToUserID(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() == u'status':
status = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print transfers"' % sys.argv[i]
sys.exit(2)
transfers_attributes = [{}]
transfers = callGAPIpages(dt.transfers(), u'list', u'items',
u'dataTransfers', customerId=GC_Values[GC_CUSTOMER_ID], status=status,
newOwnerUserId=newOwnerUserId, oldOwnerUserId=oldOwnerUserId)
for transfer in transfers:
for i in range(0, len(transfer[u'applicationDataTransfers'])):
a_transfer = dict()
a_transfer[u'oldOwnerUserEmail'] = convertUserIDtoEmail(transfer[u'oldOwnerUserId'])
a_transfer[u'newOwnerUserEmail'] = convertUserIDtoEmail(transfer[u'newOwnerUserId'])
a_transfer[u'requestTime'] = transfer[u'requestTime']
a_transfer[u'applicationId'] = transfer[u'applicationDataTransfers'][i][u'applicationId']
a_transfer[u'application'] = appID2app(dt, a_transfer[u'applicationId'])
a_transfer[u'status'] = transfer[u'applicationDataTransfers'][i][u'applicationTransferStatus']
a_transfer[u'id'] = transfer[u'id']
if u'applicationTransferParams' in transfer[u'applicationDataTransfers'][i]:
for param in transfer[u'applicationDataTransfers'][i][u'applicationTransferParams']:
a_transfer[param[u'key']] = u','.join(param[u'value'])
for title in a_transfer:
if title not in transfers_attributes[0]:
transfers_attributes[0][title] = title
transfers_attributes.append(a_transfer)
output_csv(transfers_attributes, transfers_attributes[0], u'Data Transfers', todrive)
def doGetDataTransferInfo():
dt = buildGAPIObject(u'datatransfer')
dtId = sys.argv[3]
transfer = callGAPI(dt.transfers(), u'get', dataTransferId=dtId)
print u'Old Owner: %s' % convertUserIDtoEmail(transfer[u'oldOwnerUserId'])
print u'New Owner: %s' % convertUserIDtoEmail(transfer[u'newOwnerUserId'])
print u'Request Time: %s' % transfer[u'requestTime']
for app in transfer[u'applicationDataTransfers']:
print u'Application: %s' % appID2app(dt, app[u'applicationId'])
print u'Status: %s' % app[u'applicationTransferStatus']
print u'Parameters:'
if u'applicationTransferParams' in app:
for param in app[u'applicationTransferParams']:
print u' %s: %s' % (param[u'key'], u','.join(param[u'value']))
else:
print u' None'
print
def doCreateCourse():
croom = buildGAPIObject(u'classroom')
body = dict()
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'name'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'alias', u'id']:
body[u'id'] = u'd:%s' % sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'section':
body[u'section'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'heading':
body[u'descriptionHeading'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'room':
body[u'room'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'teacher':
body[u'ownerId'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'state', u'status']:
body[u'courseState'] = sys.argv[i+1].upper()
if body[u'courseState'] not in [u'ACTIVE', u'ARCHIVED', u'PROVISIONED', u'DECLINED']:
print u'ERROR: course state can be active or archived. Got %s' % body[u'courseState']
sys.exit(2)
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam create course".' % sys.argv[i]
sys.exit(2)
if not u'ownerId' in body:
body[u'ownerId'] = u'me'
if not u'name' in body:
body[u'name'] = u'Unknown Course'
result = callGAPI(croom.courses(), u'create', body=body)
print u'Created course %s' % result[u'id']
def doGetCourseInfo():
croom = buildGAPIObject(u'classroom')
courseId = sys.argv[3]
if not courseId.isdigit():
courseId = u'd:%s' % courseId
info = callGAPI(croom.courses(), u'get', id=courseId)
print_json(None, info)
teachers = callGAPIpages(croom.courses().teachers(), u'list', u'teachers', courseId=courseId)
students = callGAPIpages(croom.courses().students(), u'list', u'students', courseId=courseId)
try:
aliases = callGAPIpages(croom.courses().aliases(), u'list', u'aliases', throw_reasons=[u'notImplemented'], courseId=courseId)
except googleapiclient.errors.HttpError:
aliases = []
if aliases:
print u'Aliases:'
for alias in aliases:
print u' %s' % alias[u'alias'][2:]
print u'Participants:'
print u' Teachers:'
for teacher in teachers:
try:
print convertUTF8(u' %s - %s' % (teacher[u'profile'][u'name'][u'fullName'], teacher[u'profile'][u'emailAddress']))
except KeyError:
print convertUTF8(u' %s' % teacher[u'profile'][u'name'][u'fullName'])
print u' Students:'
for student in students:
try:
print convertUTF8(u' %s - %s' % (student[u'profile'][u'name'][u'fullName'], student[u'profile'][u'emailAddress']))
except KeyError:
print convertUTF8(u' %s' % student[u'profile'][u'name'][u'fullName'])
def doPrintCourses():
croom = buildGAPIObject(u'classroom')
croom_attributes = [{}]
titles = []
todrive = False
teacherId = None
studentId = None
get_aliases = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'teacher':
teacherId = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'student':
studentId = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() in [u'alias', u'aliases']:
get_aliases = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print courses"' % sys.argv[i]
sys.exit(2)
sys.stderr.write(u'Retrieving courses for organization (may take some time for large accounts)...\n')
page_message = u'Got %%num_items%% courses...\n'
all_courses = callGAPIpages(croom.courses(), u'list', u'courses', page_message=page_message, teacherId=teacherId, studentId=studentId)
for course in all_courses:
croom_attributes.append(flatten_json(course))
for item in croom_attributes[-1]:
if item not in titles:
titles.append(item)
croom_attributes[0][item] = item
if get_aliases:
titles.append(u'Aliases')
croom_attributes[0].update(Aliases=u'Aliases')
num_courses = len(croom_attributes[1:])
i = 1
for course in croom_attributes[1:]:
sys.stderr.write(u'Getting aliases for course %s (%s/%s)\n' % (course[u'id'], i, num_courses))
course_aliases = callGAPIpages(croom.courses().aliases(), u'list', u'aliases', courseId=course[u'id'])
my_aliases = []
for alias in course_aliases:
my_aliases.append(alias[u'alias'][2:])
course.update(Aliases=u' '.join(my_aliases))
i += 1
output_csv(croom_attributes, titles, u'Courses', todrive)
def doPrintCourseParticipants():
croom = buildGAPIObject(u'classroom')
participants_attributes = [{}]
titles = []
todrive = False
courses = []
teacherId = None
studentId = None
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() in [u'course', u'class']:
course = sys.argv[i+1]
if not course.isdigit():
course = u'd:%s' % course
courses.append(course)
i += 2
elif sys.argv[i].lower() == u'teacher':
teacherId = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'student':
studentId = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print course-participants"' % sys.argv[i]
sys.exit(2)
sys.stderr.write(u'Retrieving courses for organization (may take some time for large accounts)...\n')
if len(courses) == 0:
page_message = u'Got %%num_items%% courses...\n'
all_courses = callGAPIpages(croom.courses(), u'list', u'courses', page_message=page_message, teacherId=teacherId, studentId=studentId)
for course in all_courses:
courses.append(course[u'id'])
else:
all_courses = []
for course in courses:
all_courses.append(callGAPI(croom.courses(), u'get', id=course))
y = 1
num_courses = len(all_courses)
for course in all_courses:
course_id = course[u'id']
teacher_message = u' got %%%%num_items%%%% teachers for course %s (%s/%s)' % (course_id, y, num_courses)
student_message = u' got %%%%num_items%%%% students for course %s (%s/%s)' % (course_id, y, num_courses)
teachers = callGAPIpages(croom.courses().teachers(), u'list', u'teachers', page_message=teacher_message, courseId=course_id)
students = callGAPIpages(croom.courses().students(), u'list', u'students', page_message=student_message, courseId=course_id)
for teacher in teachers:
participant = flatten_json(teacher)
participant[u'courseId'] = course_id
participant[u'courseName'] = course[u'name']
participant[u'userRole'] = u'TEACHER'
participants_attributes.append(participant)
for item in participant:
if item not in titles:
titles.append(item)
participants_attributes[0][item] = item
for student in students:
participant = flatten_json(student)
participant[u'courseId'] = course_id
participant[u'courseName'] = course[u'name']
participant[u'userRole'] = u'STUDENT'
participants_attributes.append(participant)
for item in participant:
if item not in titles:
titles.append(item)
participants_attributes[0][item] = item
y += 1
output_csv(participants_attributes, titles, u'Course Participants', todrive)
PRINTJOB_ASCENDINGORDER_MAP = {
u'createtime': u'CREATE_TIME',
u'status': u'STATUS',
u'title': u'TITLE',
}
PRINTJOB_DESCENDINGORDER_MAP = {
u'CREATE_TIME': u'CREATE_TIME_DESC',
u'STATUS': u'STATUS_DESC',
u'TITLE': u'TITLE_DESC',
}
def doPrintPrintJobs():
cp = buildGAPIObject(u'cloudprint')
job_attributes = [{}]
titles = []
todrive = False
printerid = None
owner = None
status = None
sortorder = None
descending = False
query = None
i = 3
age = None
older_or_newer = None
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower().replace(u'_', u'') in [u'olderthan', u'newerthan']:
if sys.argv[i].lower().replace(u'_', u'') == u'olderthan':
older_or_newer = u'older'
else:
older_or_newer = u'newer'
age_number = sys.argv[i+1][:-1]
if not age_number.isdigit():
print u'ERROR: expected a number, got %s' % age_number
sys.exit(2)
age_unit = sys.argv[i+1][-1].lower()
if age_unit == u'm':
age = int(time.time()) - (int(age_number) * 60)
elif age_unit == u'h':
age = int(time.time()) - (int(age_number) * 60 * 60)
elif age_unit == u'd':
age = int(time.time()) - (int(age_number) * 60 * 60 * 24)
else:
print u'ERROR: expected m (minutes), h (hours) or d (days), got %s' % age_unit
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'status':
status = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'ascending':
descending = False
i += 1
elif sys.argv[i].lower() == u'descending':
descending = True
i += 1
elif sys.argv[i].lower() == u'orderby':
sortorder = sys.argv[i+1].lower().replace(u'_', u'')
if sortorder not in PRINTJOB_ASCENDINGORDER_MAP:
print u'ERROR: orderby must be one of %s. Got %s' % (u','.join(PRINTJOB_ASCENDINGORDER_MAP), sortorder)
sys.exit(2)
sortorder = PRINTJOB_ASCENDINGORDER_MAP[sortorder]
i += 2
elif sys.argv[i].lower() in [u'printer', u'printerid']:
printerid = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'owner', u'user']:
owner = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam print printjobs"' % sys.argv[i]
sys.exit(2)
if sortorder and descending:
sortorder = PRINTJOB_DESCENDINGORDER_MAP[sortorder]
jobs = callGAPI(cp.jobs(), u'list', q=query, status=status, sortorder=sortorder, printerid=printerid, owner=owner)
checkCloudPrintResult(jobs)
for job in jobs[u'jobs']:
createTime = int(job[u'createTime'])/1000
if older_or_newer:
if older_or_newer == u'older' and createTime > age:
continue
elif older_or_newer == u'newer' and createTime < age:
continue
updateTime = int(job[u'updateTime'])/1000
job[u'createTime'] = datetime.datetime.fromtimestamp(createTime).strftime(u'%Y-%m-%d %H:%M:%S')
job[u'updateTime'] = datetime.datetime.fromtimestamp(updateTime).strftime(u'%Y-%m-%d %H:%M:%S')
job[u'tags'] = u' '.join(job[u'tags'])
job_attributes.append(flatten_json(job))
for item in job_attributes[-1]:
if item not in titles:
titles.append(item)
job_attributes[0][item] = item
output_csv(job_attributes, titles, u'Print Jobs', todrive)
def doPrintPrinters():
cp = buildGAPIObject(u'cloudprint')
printer_attributes = [{}]
titles = []
todrive = False
query = None
printer_type = None
connection_status = None
extra_fields = None
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'type':
printer_type = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'status':
connection_status = sys.argv[i+1]
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'extrafields':
extra_fields = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print printers"' % sys.argv[i]
sys.exit(2)
printers = callGAPI(cp.printers(), u'list', q=query, type=printer_type, connection_status=connection_status, extra_fields=extra_fields)
checkCloudPrintResult(printers)
for printer in printers[u'printers']:
createTime = int(printer[u'createTime'])/1000
accessTime = int(printer[u'accessTime'])/1000
updateTime = int(printer[u'updateTime'])/1000
printer[u'createTime'] = datetime.datetime.fromtimestamp(createTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer[u'accessTime'] = datetime.datetime.fromtimestamp(accessTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer[u'updateTime'] = datetime.datetime.fromtimestamp(updateTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer[u'tags'] = u' '.join(printer[u'tags'])
printer_attributes.append(flatten_json(printer))
for item in printer_attributes[-1]:
if item not in titles:
titles.append(item)
printer_attributes[0][item] = item
output_csv(printer_attributes, titles, u'Printers', todrive)
def changeCalendarAttendees(users):
cal = buildGAPIObject(u'calendar', users[0])
do_it = True
i = 5
allevents = False
start_date = end_date = None
while len(sys.argv) > i:
if sys.argv[i].lower() == u'csv':
csv_file = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'dryrun':
do_it = False
i += 1
elif sys.argv[i].lower() == u'start':
start_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'end':
end_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'allevents':
allevents = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> update calattendees"' % sys.argv[i]
sys.exit(2)
attendee_map = dict()
csvfile = csv.reader(open(csv_file, u'rb'))
for row in csvfile:
attendee_map[row[0].lower()] = row[1].lower()
for user in users:
sys.stdout.write(u'Checking user %s\n' % user)
if user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
cal = buildGAPIObject(u'calendar', user)
page_token = None
while True:
events_page = callGAPI(cal.events(), u'list', calendarId=user, pageToken=page_token, timeMin=start_date, timeMax=end_date, showDeleted=False, showHiddenInvitations=False)
print u'Got %s items' % len(events_page.get(u'items', []))
for event in events_page.get(u'items', []):
if event[u'status'] == u'cancelled':
#print u' skipping cancelled event'
continue
try:
event_summary = convertUTF8(event[u'summary'])
except (KeyError, UnicodeEncodeError, UnicodeDecodeError):
event_summary = event[u'id']
try:
if not allevents and event[u'organizer'][u'email'].lower() != user:
#print u' skipping not-my-event %s' % event_summary
continue
except KeyError:
pass # no email for organizer
needs_update = False
try:
for attendee in event[u'attendees']:
try:
if attendee[u'email'].lower() in attendee_map:
old_email = attendee[u'email'].lower()
new_email = attendee_map[attendee[u'email'].lower()]
print u' SWITCHING attendee %s to %s for %s' % (old_email, new_email, event_summary)
event[u'attendees'].remove(attendee)
event[u'attendees'].append({u'email': new_email})
needs_update = True
except KeyError: # no email for that attendee
pass
except KeyError:
continue # no attendees
if needs_update:
body = dict()
body[u'attendees'] = event[u'attendees']
print u'UPDATING %s' % event_summary
if do_it:
callGAPI(cal.events(), u'patch', calendarId=user, eventId=event[u'id'], sendNotifications=False, body=body)
else:
print u' not pulling the trigger.'
#else:
# print u' no update needed for %s' % event_summary
try:
page_token = events_page[u'nextPageToken']
except KeyError:
break
def deleteCalendar(users):
cal = buildGAPIObject(u'calendar', users[0])
calendarId = sys.argv[5]
if calendarId.find(u'@') == -1:
calendarId = u'%s@%s' % (calendarId, GC_Values[GC_DOMAIN])
for user in users:
if user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
cal = buildGAPIObject(u'calendar', user)
callGAPI(cal.calendarList(), u'delete', calendarId=calendarId)
def addCalendar(users):
cal = buildGAPIObject(u'calendar', users[0])
body = dict()
body[u'defaultReminders'] = list()
body[u'id'] = sys.argv[5]
if body[u'id'].find(u'@') == -1:
body[u'id'] = u'%s@%s' % (body[u'id'], GC_Values[GC_DOMAIN])
body[u'selected'] = True
body[u'hidden'] = False
colorRgbFormat = False
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'selected':
if sys.argv[i+1].lower() in true_values:
body[u'selected'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'selected'] = False
else:
print u'ERROR: Value for selected must be true or false, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'hidden':
if sys.argv[i+1].lower() in true_values:
body[u'hidden'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'hidden'] = False
else:
print u'ERROR: Value for hidden must be true or false, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'reminder':
method = sys.argv[i+1].lower()
try:
minutes = int(sys.argv[i+2])
except ValueError:
print u'ERROR: Reminder time must be specified in minutes, got %s' % sys.argv[i+2]
sys.exit(2)
if method != u'email' and method != u'sms' and method != u'popup':
print u'ERROR: Method must be email, sms or popup. Got %s' % method
sys.exit(2)
body[u'defaultReminders'].append({u'method': method, u'minutes': minutes})
i = i + 3
elif sys.argv[i].lower() == u'summary':
body[u'summaryOverride'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'colorindex':
body[u'colorId'] = str(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() == u'backgroundcolor':
body[u'backgroundColor'] = sys.argv[i+1]
colorRgbFormat = True
i += 2
elif sys.argv[i].lower() == u'foregroundcolor':
body[u'foregroundColor'] = sys.argv[i+1]
colorRgbFormat = True
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam add calendar"' % sys.argv[i]
sys.exit(2)
i = 1
count = len(users)
for user in users:
if user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
print u"Subscribing %s to %s calendar (%s of %s)" % (user, body[u'id'], i, count)
cal = buildGAPIObject(u'calendar', user)
callGAPI(cal.calendarList(), u'insert', body=body, colorRgbFormat=colorRgbFormat)
i += 1
def updateCalendar(users):
calendarId = sys.argv[5]
i = 6
body = dict()
body[u'id'] = calendarId
colorRgbFormat = False
while i < len(sys.argv):
if sys.argv[i].lower() == u'selected':
if sys.argv[i+1].lower() in true_values:
body[u'selected'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'selected'] = False
else:
print u'ERROR: Value for selected must be true or false, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'hidden':
if sys.argv[i+1].lower() in true_values:
body[u'hidden'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'hidden'] = False
else:
print u'ERROR: Value for hidden must be true or false, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'summary':
body[u'summaryOverride'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'colorindex':
body[u'colorId'] = str(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() == u'backgroundcolor':
body[u'backgroundColor'] = sys.argv[i+1]
colorRgbFormat = True
i += 2
elif sys.argv[i].lower() == u'foregroundcolor':
body[u'foregroundColor'] = sys.argv[i+1]
colorRgbFormat = True
i += 2
elif sys.argv[i].lower() == u'reminder':
method = sys.argv[i+1].lower()
try:
minutes = int(sys.argv[i+2])
except ValueError:
print u'ERROR: Reminder time must be specified in minutes, got %s' % sys.argv[i+2]
sys.exit(2)
if method != u'email' and method != u'sms' and method != u'popup':
print u'ERROR: Method must be email, sms or popup. Got %s' % method
sys.exit(2)
try:
body[u'defaultReminders'].append({u'method': method, u'minutes': minutes})
except KeyError:
body[u'defaultReminders'] = [{u'method': method, u'minutes': minutes}]
i = i + 3
else:
print u'ERROR: %s is not a valid argument for "gam update calendar"' % sys.argv[i]
sys.exit(2)
i = 1
count = len(users)
for user in users:
print u"Updating %s's subscription to calendar %s (%s of %s)" % (user, calendarId, i, count)
cal = buildGAPIObject(u'calendar', user)
callGAPI(cal.calendarList(), u'update', calendarId=calendarId, body=body, colorRgbFormat=colorRgbFormat)
def doPrinterShowACL():
cp = buildGAPIObject(u'cloudprint')
show_printer = sys.argv[2]
printer_info = callGAPI(cp.printers(), u'get', printerid=show_printer)
checkCloudPrintResult(printer_info)
for acl in printer_info[u'printers'][0][u'access']:
if u'key' in acl:
acl[u'accessURL'] = u'https://www.google.com/cloudprint/addpublicprinter.html?printerid=%s&key=%s' % (show_printer, acl[u'key'])
print_json(None, acl)
print
def doPrinterAddACL():
cp = buildGAPIObject(u'cloudprint')
printer = sys.argv[2]
role = sys.argv[4].upper()
scope = sys.argv[5]
public = None
skip_notification = True
if scope.lower() == u'public':
public = True
scope = None
role = None
skip_notification = None
elif scope.find(u'@') == -1:
scope = u'/hd/domain/%s' % scope
result = callGAPI(cp.printers(), u'share', printerid=printer, role=role, scope=scope, public=public, skip_notification=skip_notification)
checkCloudPrintResult(result)
who = scope
if who == None:
who = u'public'
role = u'user'
print u'Added %s %s' % (role, who)
def doPrinterDelACL():
cp = buildGAPIObject(u'cloudprint')
printer = sys.argv[2]
scope = sys.argv[4]
public = None
if scope.lower() == u'public':
public = True
scope = None
elif scope.find(u'@') == -1:
scope = u'/hd/domain/%s' % scope
result = callGAPI(cp.printers(), u'unshare', printerid=printer, scope=scope, public=public)
checkCloudPrintResult(result)
who = scope
if who == None:
who = u'public'
print u'Removed %s' % who
def encode_multipart(fields, files, boundary=None):
def escape_quote(s):
return s.replace(u'"', u'\\"')
def getFormDataLine(name, value, boundary):
return '--{0}'.format(boundary), u'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)), u'', str(value)
if boundary is None:
boundary = u''.join(random.choice(string.digits + string.ascii_letters) for i in range(30))
lines = []
for name, value in fields.items():
if name == u'tags':
for tag in value:
lines.extend(getFormDataLine(u'tag', tag, boundary))
else:
lines.extend(getFormDataLine(name, value, boundary))
for name, value in files.items():
filename = value[u'filename']
mimetype = value[u'mimetype']
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
value[u'content'],
))
lines.extend((
'--{0}--'.format(boundary),
'',
))
body = u'\r\n'.join(lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
return (body, headers)
def doPrintJobFetch():
cp = buildGAPIObject(u'cloudprint')
printerid = sys.argv[2]
if printerid == u'any':
printerid = None
owner = None
status = None
sortorder = None
descending = False
query = None
age = None
older_or_newer = None
i = 4
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') in [u'olderthan', u'newerthan']:
if sys.argv[i].lower().replace(u'_', u'') == u'olderthan':
older_or_newer = u'older'
else:
older_or_newer = u'newer'
age_number = sys.argv[i+1][:-1]
if not age_number.isdigit():
print u'ERROR: expected a number, got %s' % age_number
sys.exit(2)
age_unit = sys.argv[i+1][-1].lower()
if age_unit == u'm':
age = int(time.time()) - (int(age_number) * 60)
elif age_unit == u'h':
age = int(time.time()) - (int(age_number) * 60 * 60)
elif age_unit == u'd':
age = int(time.time()) - (int(age_number) * 60 * 60 * 24)
else:
print u'ERROR: expected m (minutes), h (hours) or d (days), got %s' % age_unit
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'status':
status = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'ascending':
descending = False
i += 1
elif sys.argv[i].lower() == u'descending':
descending = True
i += 1
elif sys.argv[i].lower() == u'orderby':
sortorder = sys.argv[i+1].lower().replace(u'_', u'')
if sortorder not in PRINTJOB_ASCENDINGORDER_MAP:
print u'ERROR: orderby must be one of %s. Got %s' % (u','.join(PRINTJOB_ASCENDINGORDER_MAP), sortorder)
sys.exit(2)
sortorder = PRINTJOB_ASCENDINGORDER_MAP[sortorder]
i += 2
elif sys.argv[i].lower() in [u'owner', u'user']:
owner = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam printjobs fetch"' % sys.argv[i]
sys.exit(2)
if sortorder and descending:
sortorder = PRINTJOB_DESCENDINGORDER_MAP[sortorder]
result = callGAPI(cp.jobs(), u'list', q=query, status=status, sortorder=sortorder, printerid=printerid, owner=owner)
if u'errorCode' in result and result[u'errorCode'] == 413:
print u'No print jobs.'
sys.exit(0)
checkCloudPrintResult(result)
valid_chars = u'-_.() abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
ssd = u'''{
"state": {"type": "DONE"}
}'''
for job in result[u'jobs']:
createTime = int(job[u'createTime'])/1000
if older_or_newer:
if older_or_newer == u'older' and createTime > age:
continue
elif older_or_newer == u'newer' and createTime < age:
continue
fileUrl = job[u'fileUrl']
jobid = job[u'id']
fileName = job[u'title']
fileName = u''.join(c if c in valid_chars else u'_' for c in fileName)
fileName = u'%s-%s' % (fileName, jobid)
_, content = cp._http.request(uri=fileUrl, method=u'GET')
if writeFile(fileName, content, continueOnError=True):
# ticket = callGAPI(cp.jobs(), u'getticket', jobid=jobid, use_cjt=True)
result = callGAPI(cp.jobs(), u'update', jobid=jobid, semantic_state_diff=ssd)
checkCloudPrintResult(result)
print u'Printed job %s to %s' % (jobid, fileName)
def doDelPrinter():
cp = buildGAPIObject(u'cloudprint')
printerid = sys.argv[3]
result = callGAPI(cp.printers(), u'delete', printerid=printerid)
checkCloudPrintResult(result)
def doGetPrinterInfo():
cp = buildGAPIObject(u'cloudprint')
printerid = sys.argv[3]
everything = False
i = 4
while i < len(sys.argv):
if sys.argv[i] == u'everything':
everything = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam info printer"' % sys.argv[i]
sys.exit(2)
result = callGAPI(cp.printers(), u'get', printerid=printerid)
checkCloudPrintResult(result)
printer_info = result[u'printers'][0]
createTime = int(printer_info[u'createTime'])/1000
accessTime = int(printer_info[u'accessTime'])/1000
updateTime = int(printer_info[u'updateTime'])/1000
printer_info[u'createTime'] = datetime.datetime.fromtimestamp(createTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer_info[u'accessTime'] = datetime.datetime.fromtimestamp(accessTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer_info[u'updateTime'] = datetime.datetime.fromtimestamp(updateTime).strftime(u'%Y-%m-%d %H:%M:%S')
printer_info[u'tags'] = u' '.join(printer_info[u'tags'])
if not everything:
del printer_info[u'capabilities']
del printer_info[u'access']
print_json(None, printer_info)
def doUpdatePrinter():
cp = buildGAPIObject(u'cloudprint')
printerid = sys.argv[3]
kwargs = {}
i = 4
update_items = [u'isTosAccepted', u'gcpVersion', u'setupUrl',
u'quotaEnabled', u'id', u'supportUrl', u'firmware',
u'currentQuota', u'type', u'public', u'status', u'description',
u'defaultDisplayName', u'proxy', u'dailyQuota', u'manufacturer',
u'displayName', u'name', u'uuid', u'updateUrl', u'ownerId', u'model']
while i < len(sys.argv):
arg_in_item = False
for item in update_items:
if item.lower() == sys.argv[i].lower():
kwargs[item] = sys.argv[i+1]
i += 2
arg_in_item = True
break
if not arg_in_item:
print u'ERROR: %s is not a valid argument for "gam update printer"' % sys.argv[i]
sys.exit(2)
result = callGAPI(cp.printers(), u'update', printerid=printerid, **kwargs)
checkCloudPrintResult(result)
print u'Updated printer %s' % printerid
def doPrinterRegister():
cp = buildGAPIObject(u'cloudprint')
form_fields = {u'name': u'GAM',
u'proxy': u'GAM',
u'uuid': cp._http.request.credentials.id_token[u'sub'],
u'manufacturer': __author__,
u'model': u'cp1',
u'gcp_version': u'2.0',
u'setup_url': GAM_URL,
u'support_url': u'https://groups.google.com/forum/#!forum/google-apps-manager',
u'update_url': GAM_RELEASES,
u'firmware': __version__,
u'semantic_state': {"version": "1.0", "printer": {"state": "IDLE",}},
u'use_cdd': True,
u'capabilities': {"version": "1.0",
"printer": {"supported_content_type": [{"content_type": "application/pdf", "min_version": "1.5"},
{"content_type": "image/jpeg"},
{"content_type": "text/plain"}
],
"copies": {"default": 1, "max": 100},
"media_size": {"option": [{"name": "ISO_A4", "width_microns": 210000, "height_microns": 297000},
{"name": "NA_LEGAL", "width_microns": 215900, "height_microns": 355600},
{"name": "NA_LETTER", "width_microns": 215900, "height_microns": 279400, "is_default": True}
],
},
},
},
u'tags': [u'GAM', GAM_URL],
}
form_files = {}
body, headers = encode_multipart(form_fields, form_files)
#Get the printer first to make sure our OAuth access token is fresh
callGAPI(cp.printers(), u'list')
_, result = cp._http.request(uri=u'https://www.google.com/cloudprint/register', method=u'POST', body=body, headers=headers)
result = json.loads(result)
checkCloudPrintResult(result)
print u'Created printer %s' % result[u'printers'][0][u'id']
def doPrintJobResubmit():
cp = buildGAPIObject(u'cloudprint')
jobid = sys.argv[2]
printerid = sys.argv[4]
ssd = u'''{
"state": {"type": "HELD"}
}'''
result = callGAPI(cp.jobs(), u'update', jobid=jobid, semantic_state_diff=ssd)
checkCloudPrintResult(result)
ticket = callGAPI(cp.jobs(), u'getticket', jobid=jobid, use_cjt=True)
result = callGAPI(cp.jobs(), u'resubmit', printerid=printerid, jobid=jobid, ticket=ticket)
checkCloudPrintResult(result)
print u'Success resubmitting %s as job %s to printer %s' % (jobid, result[u'job'][u'id'], printerid)
def doPrintJobSubmit():
cp = buildGAPIObject(u'cloudprint')
printer = sys.argv[2]
content = sys.argv[4]
form_fields = {u'printerid': printer,
u'title': content,
u'ticket': u'{"version": "1.0"}',
u'tags': [u'GAM', GAM_URL]}
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'tag':
form_fields[u'tags'].append(sys.argv[i+1])
i += 2
elif sys.argv[i].lower() in [u'name', u'title']:
form_fields[u'title'] = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam printer ... print"' % sys.argv[i]
sys.exit(2)
form_files = {}
if content[:4] == u'http':
form_fields[u'content'] = content
form_fields[u'contentType'] = u'url'
else:
filepath = content
content = ntpath.basename(content)
mimetype = mimetypes.guess_type(filepath)[0]
if mimetype == None:
mimetype = u'application/octet-stream'
filecontent = readFile(filepath)
form_files[u'content'] = {u'filename': content, u'content': filecontent, u'mimetype': mimetype}
#result = callGAPI(cp.printers(), u'submit', body=body)
body, headers = encode_multipart(form_fields, form_files)
#Get the printer first to make sure our OAuth access token is fresh
callGAPI(cp.printers(), u'get', printerid=printer)
_, result = cp._http.request(uri=u'https://www.google.com/cloudprint/submit', method=u'POST', body=body, headers=headers)
checkCloudPrintResult(result)
if type(result) is str:
result = json.loads(result)
print u'Submitted print job %s' % result[u'job'][u'id']
def doDeletePrintJob():
cp = buildGAPIObject(u'cloudprint')
job = sys.argv[2]
result = callGAPI(cp.jobs(), u'delete', jobid=job)
checkCloudPrintResult(result)
print u'Print Job %s deleted' % job
def doCancelPrintJob():
cp = buildGAPIObject(u'cloudprint')
job = sys.argv[2]
ssd = u'{"state": {"type": "ABORTED", "user_action_cause": {"action_code": "CANCELLED"}}}'
result = callGAPI(cp.jobs(), u'update', jobid=job, semantic_state_diff=ssd)
checkCloudPrintResult(result)
print u'Print Job %s cancelled' % job
def checkCloudPrintResult(result):
if type(result) is str:
try:
result = json.loads(result)
except ValueError:
print u'ERROR: unexpected response: %s' % result
sys.exit(3)
if not result[u'success']:
print u'ERROR %s: %s' % (result[u'errorCode'], result[u'message'])
sys.exit(result[u'errorCode'])
def doCalendarShowACL():
cal = buildGAPIObject(u'calendar')
show_cal = sys.argv[2]
if show_cal.find(u'@') == -1:
show_cal = u'%s@%s' % (show_cal, GC_Values[GC_DOMAIN])
acls = callGAPI(cal.acl(), u'list', calendarId=show_cal)
try:
for rule in acls[u'items']:
print u' Scope %s - %s' % (rule[u'scope'][u'type'], rule[u'scope'][u'value'])
print u' Role: %s' % (rule[u'role'])
print u''
except IndexError:
pass
def doCalendarAddACL(calendarId=None, act_as=None, role=None, scope=None, entity=None):
if act_as != None:
cal = buildGAPIObject(u'calendar', act_as)
else:
cal = buildGAPIObject(u'calendar')
body = dict()
body[u'scope'] = dict()
if calendarId == None:
calendarId = sys.argv[2]
if calendarId.find(u'@') == -1:
calendarId = u'%s@%s' % (calendarId, GC_Values[GC_DOMAIN])
if role != None:
body[u'role'] = role
else:
body[u'role'] = sys.argv[4].lower()
if body[u'role'] not in [u'freebusy', u'read', u'reader', u'editor', u'owner', u'none']:
print u'ERROR: Role must be freebusy, read, editor, owner or none. Not %s' % body[u'role']
sys.exit(2)
if body[u'role'] == u'freebusy':
body[u'role'] = u'freeBusyReader'
elif body[u'role'] in [u'read', u'reader']:
body[u'role'] = u'reader'
elif body[u'role'] == u'editor':
body[u'role'] = u'writer'
if scope != None:
body[u'scope'][u'type'] = scope
else:
body[u'scope'][u'type'] = sys.argv[5].lower()
i = 6
if body[u'scope'][u'type'] not in [u'default', u'user', u'group', u'domain']:
body[u'scope'][u'type'] = u'user'
i = 5
try:
if entity != None and body[u'scope'][u'type'] != u'default':
body[u'scope'][u'value'] = entity
else:
body[u'scope'][u'value'] = sys.argv[i].lower()
if (body[u'scope'][u'type'] in [u'user', u'group']) and body[u'scope'][u'value'].find(u'@') == -1:
body[u'scope'][u'value'] = u'%s@%s' % (body[u'scope'][u'value'], GC_Values[GC_DOMAIN])
except IndexError:
pass
if body[u'scope'][u'type'] == u'domain':
try:
body[u'scope'][u'value'] = sys.argv[6].lower()
except IndexError:
body[u'scope'][u'value'] = GC_Values[GC_DOMAIN]
callGAPI(cal.acl(), u'insert', calendarId=calendarId, body=body)
def doCalendarUpdateACL():
calendarId = sys.argv[2]
role = sys.argv[4].lower()
scope = sys.argv[5].lower()
if len(sys.argv) > 6:
entity = sys.argv[6].lower()
else:
entity = None
doCalendarAddACL(calendarId=calendarId, role=role, scope=scope, entity=entity)
def doCalendarDelACL():
calendarId = sys.argv[2]
entity = sys.argv[5].lower()
scope = u'user'
if entity == u'domain':
scope = u'domain'
elif entity == u'default':
scope = u'default'
entity = u''
doCalendarAddACL(calendarId=calendarId, role=u'none', scope=scope, entity=entity)
def doCalendarWipeData():
calendarId = sys.argv[2]
cal = buildGAPIObject(u'calendar', calendarId)
if calendarId.find(u'@') == -1:
calendarId = u'%s@%s' % (calendarId, GC_Values[GC_DOMAIN])
callGAPI(cal.calendars(), u'clear', calendarId=calendarId)
def doCalendarAddEvent():
calendarId = sys.argv[2]
cal = buildGAPIObject(u'calendar', calendarId)
sendNotifications = timeZone = None
i = 4
body = {}
while i < len(sys.argv):
if sys.argv[i].lower() == u'notifyattendees':
sendNotifications = True
i += 1
elif sys.argv[i].lower() == u'attendee':
try:
body[u'attendees'].append({u'email': sys.argv[i+1]})
except KeyError:
body[u'attendees'] = [{u'email': sys.argv[i+1]},]
i += 2
elif sys.argv[i].lower() == u'optionalattendee':
try:
body[u'attendees'].append({u'email': sys.argv[i+1], u'optional': True})
except TypeError:
body[u'attendees'] = [{u'email': sys.argv[i+1], u'optional': True},]
i += 2
elif sys.argv[i].lower() == u'anyonecanaddself':
body[u'anyoneCanAddSelf'] = True
i += 1
elif sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'start':
if sys.argv[i+1].lower() == u'allday':
body[u'start'] = {u'date': sys.argv[i+2]}
i += 3
else:
body[u'start'] = {u'dateTime': sys.argv[i+1]}
i += 2
elif sys.argv[i].lower() == u'end':
if sys.argv[i+1].lower() == u'allday':
body[u'end'] = {u'date': sys.argv[i+2]}
i += 3
else:
body[u'end'] = {u'dateTime': sys.argv[i+1]}
i += 2
elif sys.argv[i].lower() == u'guestscantinviteothers':
body[u'guestsCanInviteOthers'] = False
i += 1
elif sys.argv[i].lower() == u'guestscantseeothers':
body[u'guestsCanSeeOtherGuests'] = False
i += 1
elif sys.argv[i].lower() == u'id':
body[u'id'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'summary':
body[u'summary'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'location':
body[u'location'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'available':
body[u'transparency'] = u'transparent'
i += 1
elif sys.argv[i].lower() == u'visibility':
if sys.argv[i+1].lower() in [u'default', u'public', u'private']:
body[u'visibility'] = sys.argv[i+1].lower()
else:
print u'ERROR: visibility must be one of default, public or private, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'tentative':
body[u'status'] = u'tentative'
i += 1
elif sys.argv[i].lower() == u'source':
body[u'source'] = {u'title': sys.argv[i+1], u'url': sys.argv[i+2]}
i += 3
elif sys.argv[i].lower() == u'noreminders':
body[u'reminders'] = {u'useDefault': False}
i += 1
elif sys.argv[i].lower() == u'reminder':
try:
body[u'reminders'][u'overrides'].append({u'minutes': sys.argv[i+1], u'method': sys.argv[i+2]})
body[u'reminders'][u'useDefault'] = False
except KeyError:
body[u'reminders'] = {u'useDefault': False, u'overrides': [{u'minutes': sys.argv[i+1], u'method': sys.argv[i+2]},]}
i += 3
elif sys.argv[i].lower() == u'recurrence':
try:
body[u'recurrence'].append(sys.argv[i+1])
except KeyError:
body[u'recurrence'] = [sys.argv[i+1],]
i += 2
elif sys.argv[i].lower() == u'timezone':
timeZone = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'privateproperty':
if u'extendedProperties' not in body:
body[u'extendedProperties'] = {u'private': {}, u'shared': {}}
body[u'extendedProperties'][u'private'][sys.argv[i+1]] = sys.argv[i+2]
i += 3
elif sys.argv[i].lower() == u'sharedproperty':
if u'extendedProperties' not in body:
body[u'extendedProperties'] = {u'private': {}, u'shared': {}}
body[u'extendedProperties'][u'shared'][sys.argv[i+1]] = sys.argv[i+2]
i += 3
elif sys.argv[i].lower() == u'colorindex':
body[u'colorId'] = str(sys.argv[i+1])
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam calendar"' % sys.argv[i]
sys.exit(2)
if not timeZone and u'recurrence' in body:
timeZone = callGAPI(cal.calendars(), u'get', calendarId=calendarId, fields=u'timeZone')[u'timeZone']
if u'recurrence' in body:
for a_time in [u'start', u'end']:
try:
body[a_time][u'timeZone'] = timeZone
except KeyError:
pass
callGAPI(cal.events(), u'insert', calendarId=calendarId, sendNotifications=sendNotifications, body=body)
def doProfile(users):
cd = buildGAPIObject(u'directory')
if sys.argv[4].lower() == u'share' or sys.argv[4].lower() == u'shared':
body = {u'includeInGlobalAddressList': True}
elif sys.argv[4].lower() == u'unshare' or sys.argv[4].lower() == u'unshared':
body = {u'includeInGlobalAddressList': False}
else:
print u'ERROR: value for "gam <users> profile" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
count = len(users)
i = 1
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
print u'Setting Profile Sharing to %s for %s (%s of %s)' % (body[u'includeInGlobalAddressList'], user, i, count)
callGAPI(cd.users(), u'patch', soft_errors=True, userKey=user, body=body)
i += 1
def showProfile(users):
cd = buildGAPIObject(u'directory')
i = 1
count = len(users)
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
result = callGAPI(cd.users(), u'get', userKey=user, fields=u'includeInGlobalAddressList')
try:
print u'User: %s Profile Shared: %s (%s/%s)' % (user, result[u'includeInGlobalAddressList'], i, count)
except IndexError:
pass
i += 1
def doPhoto(users):
cd = buildGAPIObject(u'directory')
i = 1
count = len(users)
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
filename = sys.argv[5].replace(u'#user#', user)
filename = filename.replace(u'#email#', user)
filename = filename.replace(u'#username#', user[:user.find(u'@')])
print u"Updating photo for %s with %s (%s of %s)" % (user, filename, i, count)
i += 1
if re.match(u'^(ht|f)tps?://.*$', filename):
import urllib2
try:
f = urllib2.urlopen(filename)
image_data = str(f.read())
except urllib2.HTTPError, e:
print e
continue
else:
try:
with open(filename, u'rb') as f:
image_data = f.read()
except IOError, e:
print u' couldn\'t open %s: %s' % (filename, e.strerror)
continue
image_data = base64.urlsafe_b64encode(image_data)
body = {u'photoData': image_data}
callGAPI(cd.users().photos(), u'update', soft_errors=True, userKey=user, body=body)
def getPhoto(users):
cd = buildGAPIObject(u'directory')
i = 1
count = len(users)
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
filename = u'%s.jpg' % user
print u"Saving photo to %s (%s/%s)" % (filename, i, count)
i += 1
try:
photo = callGAPI(cd.users().photos(), u'get', throw_reasons=[u'notFound'], userKey=user)
except googleapiclient.errors.HttpError:
print u' no photo for %s' % user
continue
try:
photo_data = str(photo[u'photoData'])
print photo_data
photo_data = base64.urlsafe_b64decode(photo_data)
except KeyError:
print u' no photo for %s' % user
continue
writeFile(filename, photo_data, continueOnError=True)
def deletePhoto(users):
cd = buildGAPIObject(u'directory')
i = 1
count = len(users)
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
print u"Deleting photo for %s (%s of %s)" % (user, i, count)
callGAPI(cd.users().photos(), u'delete', userKey=user)
i += 1
def showCalendars(users):
for user in users:
cal = buildGAPIObject(u'calendar', user)
feed = callGAPI(cal.calendarList(), u'list')
for usercal in feed[u'items']:
print u' Name: %s' % usercal[u'id']
print convertUTF8(u' Summary: %s' % usercal[u'summary'])
try:
print convertUTF8(u' Description: %s' % usercal[u'description'])
except KeyError:
print u' Description: '
print u' Access Level: %s' % usercal[u'accessRole']
print u' Timezone: %s' % usercal[u'timeZone']
try:
print convertUTF8(u' Location: %s' % usercal[u'location'])
except KeyError:
pass
try:
print u' Hidden: %s' % usercal[u'hidden']
except KeyError:
print u' Hidden: False'
try:
print u' Selected: %s' % usercal[u'selected']
except KeyError:
print u' Selected: False'
print u' Default Reminders:'
try:
for reminder in usercal[u'defaultReminders']:
print u' Type: %s Minutes: %s' % (reminder[u'method'], reminder[u'minutes'])
except KeyError:
pass
print u''
def showCalSettings(users):
for user in users:
cal = buildGAPIObject(u'calendar', user)
feed = callGAPI(cal.settings(), u'list')
for setting in feed[u'items']:
print u'%s: %s' % (setting[u'id'], setting[u'value'])
def showDriveSettings(users):
todrive = False
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> show drivesettings"' % sys.argv[i]
sys.exit(2)
dont_show = [u'kind', u'selfLink', u'exportFormats', u'importFormats', u'maxUploadSizes', u'additionalRoleInfo', u'etag', u'features', u'user', u'isCurrentAppInstalled']
count = 1
drive_attr = []
titles = [u'email',]
for user in users:
sys.stderr.write(u'Getting Drive settings for %s (%s of %s)\n' % (user, count, len(users)))
count += 1
drive = buildGAPIObject(u'drive', user)
feed = callGAPI(drive.about(), u'get', soft_errors=True)
if feed == None:
continue
row = {u'email': user}
for setting in feed:
if setting in dont_show:
continue
if setting == u'quotaBytesByService':
for subsetting in feed[setting]:
my_name = subsetting[u'serviceName']
my_bytes = int(subsetting[u'bytesUsed'])
row[my_name] = u'%smb' % (my_bytes / 1024 / 1024)
if my_name not in titles:
titles.append(my_name)
continue
row[setting] = feed[setting]
if setting not in titles:
titles.append(setting)
drive_attr.append(row)
headers = {}
for title in titles:
headers[title] = title
drive_attr.insert(0, headers)
output_csv(drive_attr, titles, u'User Drive Settings', todrive)
def doDriveActivity(users):
drive_ancestorId = u'root'
drive_fileId = None
todrive = False
i = 5
while i < len(sys.argv):
activity_object = sys.argv[i].lower().replace(u'_', u'')
if activity_object == u'fileid':
drive_fileId = sys.argv[i+1]
drive_ancestorId = None
i += 2
elif activity_object == u'folderid':
drive_ancestorId = sys.argv[i+1]
i += 2
elif activity_object == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> show driveactivity"' % sys.argv[i]
sys.exit(2)
activity_attributes = [{},]
for user in users:
activity = buildGAPIObject(u'appsactivity', user)
page_message = u'Retrieved %%%%total_items%%%% activities for %s' % user
feed = callGAPIpages(activity.activities(), u'list', u'activities',
page_message=page_message, source=u'drive.google.com', userId=u'me',
drive_ancestorId=drive_ancestorId, groupingStrategy=u'none',
drive_fileId=drive_fileId, pageSize=GC_Values[GC_ACTIVITY_MAX_RESULTS])
for item in feed:
activity_attributes.append(flatten_json(item[u'combinedEvent']))
for an_item in activity_attributes[-1]:
if an_item not in activity_attributes[0]:
activity_attributes[0][an_item] = an_item
output_csv(activity_attributes, activity_attributes[0], u'Drive Activity', todrive)
def showDriveFileACL(users):
fileId = sys.argv[5]
for user in users:
drive = buildGAPIObject(u'drive', user)
feed = callGAPI(drive.permissions(), u'list', fileId=fileId)
for permission in feed[u'items']:
try:
print permission[u'name']
except KeyError:
pass
for key in permission:
if key in [u'name', u'kind', u'etag', u'selfLink',]:
continue
print u' %s: %s' % (key, permission[key])
print u''
def delDriveFileACL(users):
fileId = sys.argv[5]
permissionId = unicode(sys.argv[6])
for user in users:
drive = buildGAPIObject(u'drive', user)
if permissionId[:3].lower() == u'id:':
permissionId = permissionId[3:]
elif permissionId.lower() in [u'anyone']:
pass
else:
permissionId = callGAPI(drive.permissions(), u'getIdForEmail', email=permissionId, fields=u'id')[u'id']
print u'Removing permission for %s from %s' % (permissionId, fileId)
callGAPI(drive.permissions(), u'delete', fileId=fileId, permissionId=permissionId)
def addDriveFileACL(users):
fileId = sys.argv[5]
body = {u'type': sys.argv[6].lower()}
sendNotificationEmails = False
emailMessage = None
if body[u'type'] not in [u'user', u'group', u'domain', u'anyone']:
print u'ERROR: permission type must be user, group domain or anyone. Got %s' % body[u'type']
if body[u'type'] == u'anyone':
i = 7
else:
body[u'value'] = sys.argv[7]
i = 8
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'withlink':
body[u'withLink'] = True
i += 1
elif sys.argv[i].lower() == u'role':
body[u'role'] = sys.argv[i+1]
if body[u'role'] not in [u'reader', u'commenter', u'writer', u'owner', u'editor']:
print u'ERROR: role must be reader, commenter, writer or owner, got %s' % body[u'role']
sys.exit(2)
if body[u'role'] == u'commenter':
body[u'role'] = u'reader'
body[u'additionalRoles'] = [u'commenter']
elif body[u'role'] == u'editor':
body[u'role'] = u'writer'
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'sendemail':
sendNotificationEmails = True
i += 1
elif sys.argv[i].lower().replace(u'_', u'') == u'emailmessage':
sendNotificationEmails = True
emailMessage = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> add drivefileacl"' % sys.argv[i]
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
result = callGAPI(drive.permissions(), u'insert', fileId=fileId, sendNotificationEmails=sendNotificationEmails, emailMessage=emailMessage, body=body)
print result
def updateDriveFileACL(users):
fileId = sys.argv[5]
permissionId = unicode(sys.argv[6])
transferOwnership = None
body = {}
i = 7
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'withlink':
body[u'withLink'] = True
i += 1
elif sys.argv[i].lower() == u'role':
body[u'role'] = sys.argv[i+1]
if body[u'role'] not in [u'reader', u'commenter', u'writer', u'owner']:
print u'ERROR: role must be reader, commenter, writer or owner, got %s' % body[u'role']
sys.exit(2)
if body[u'role'] == u'commenter':
body[u'role'] = u'reader'
body[u'additionalRoles'] = [u'commenter']
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'transferownership':
if sys.argv[i+1].lower() in true_values:
transferOwnership = True
elif sys.argv[i+1].lower() in false_values:
transferOwnership = False
else:
print u'ERROR: transferownership should be true or false, got %s' % sys.argv[i+1].lower()
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> update drivefileacl"' % sys.argv[i]
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
if permissionId[:3].lower() == u'id:':
permissionId = permissionId[3:]
else:
permissionId = callGAPI(drive.permissions(), u'getIdForEmail', email=permissionId, fields=u'id')[u'id']
print u'updating permissions for %s to file %s' % (permissionId, fileId)
result = callGAPI(drive.permissions(), u'patch', fileId=fileId, permissionId=permissionId, transferOwnership=transferOwnership, body=body)
print result
def showDriveFiles(users):
files_attr = [{u'Owner': u'Owner',}]
titles = [u'Owner',]
fields = u'nextPageToken,items(title,alternateLink'
todrive = False
query = u'"me" in owners'
i = 5
labels = list()
while i < len(sys.argv):
my_arg = sys.argv[i].lower().replace(u'_', u'')
if my_arg == u'todrive':
todrive = True
i += 1
elif my_arg == u'query':
query += u' and %s' % sys.argv[i+1]
i += 2
elif my_arg == u'allfields':
fields = u'*'
i += 1
elif my_arg == u'createddate':
fields += u',createdDate'
i += 1
elif my_arg == u'description':
fields += u',description'
i += 1
elif my_arg == u'fileextension':
fields += u',fileExtension'
i += 1
elif my_arg == u'filesize':
fields += u',fileSize'
i += 1
elif my_arg == u'id':
fields += u',id'
i += 1
elif my_arg in [u'restricted', u'restrict']:
labels.append(u'restricted')
i += 1
elif my_arg in [u'starred', u'star']:
labels.append(u'starred')
i += 1
elif my_arg in [u'trashed', u'trash']:
labels.append(u'trashed')
i += 1
elif my_arg in [u'viewed', u'view']:
labels.append(u'viewed')
i += 1
elif my_arg in [u'lastmodifyinguser', u'lastmodifyingusername']:
fields += u',lastModifyingUserName'
i += 1
elif my_arg in [u'lastviewedbyuser', u'lastviewedbymedate']:
fields += u',lastViewedByMeDate'
i += 1
elif my_arg in [u'md5', u'md5sum', u'md5checksum']:
fields += u',md5Checksum'
i += 1
elif my_arg in [u'mimetype', u'mime']:
fields += u',mimeType'
i += 1
elif my_arg in [u'modifiedbyuser', u'modifiedbymedate']:
fields += u',modifiedByMeDate'
i += 1
elif my_arg in [u'modifieddate']:
fields += u',modifiedDate'
i += 1
elif my_arg in [u'originalfilename']:
fields += u',originalFilename'
i += 1
elif my_arg in [u'quotaused', u'quotabytesused']:
fields += u',quotaBytesUsed'
i += 1
elif my_arg in [u'shared']:
fields += u',shared'
i += 1
elif my_arg in [u'writerscanshare']:
fields += u',writersCanShare'
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> show filelist"' % my_arg
sys.exit(2)
if len(labels) > 0:
fields += u',labels(%s)' % ','.join(labels)
if fields != u'*':
fields += u')'
for user in users:
drive = buildGAPIObject(u'drive', user)
if user.find(u'@') == -1:
print u'ERROR: got %s, expected a full email address' % user
sys.exit(2)
sys.stderr.write(u'Getting files for %s...\n' % user)
page_message = u' got %%%%total_items%%%% files for %s...\n' % user
feed = callGAPIpages(drive.files(), u'list', u'items',
page_message=page_message, soft_errors=True,
q=query, fields=fields, maxResults=GC_Values[GC_DRIVE_MAX_RESULTS])
for f_file in feed:
a_file = {u'Owner': user}
for attrib in f_file:
if attrib in [u'kind', u'etags', u'etag', u'owners', u'parents', u'permissions']:
continue
attrib_type = type(f_file[attrib])
if attrib not in titles and not attrib_type is dict:
titles.append(attrib)
files_attr[0][attrib] = attrib
if attrib_type is list:
a_file[attrib] = u' '.join(f_file[attrib])
elif attrib_type is unicode or attrib_type is bool:
a_file[attrib] = f_file[attrib]
elif attrib_type is dict:
if attrib == u'labels':
for dict_attrib in f_file[attrib]:
if dict_attrib not in titles:
titles.append(dict_attrib)
files_attr[0][dict_attrib] = dict_attrib
a_file[dict_attrib] = f_file[attrib][dict_attrib]
else:
for dict_attrib in f_file[attrib]:
if dict_attrib in [u'kind', u'etags', u'etag']:
continue
x_attrib = u'{0}.{1}'.format(attrib, dict_attrib)
if x_attrib not in titles:
titles.append(x_attrib)
files_attr[0][x_attrib] = x_attrib
a_file[x_attrib] = f_file[attrib][dict_attrib]
else:
print attrib_type
files_attr.append(a_file)
output_csv(files_attr, titles, u'%s %s Drive Files' % (sys.argv[1], sys.argv[2]), todrive)
def doDriveSearch(drive, query=None):
print u'Searching for files with query: "%s"...' % query
page_message = u' got %%total_items%% files...\n'
files = callGAPIpages(drive.files(), u'list', u'items',
page_message=page_message,
q=query, fields=u'nextPageToken,items(id)', maxResults=GC_Values[GC_DRIVE_MAX_RESULTS])
ids = list()
for f_file in files:
ids.append(f_file[u'id'])
return ids
def deleteDriveFile(users):
fileIds = sys.argv[5]
function = u'trash'
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'purge':
function = u'delete'
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> delete drivefile"' % sys.argv[i]
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
if fileIds[:6].lower() == u'query:':
file_ids = doDriveSearch(drive, query=fileIds[6:])
else:
if fileIds[:8].lower() == u'https://' or fileIds[:7].lower() == u'http://':
fileIds = fileIds[fileIds.find(u'/d/')+3:]
if fileIds.find(u'/') != -1:
fileIds = fileIds[:fileIds.find(u'/')]
file_ids = [fileIds,]
if not file_ids:
print u'No files to delete for %s' % user
i = 0
for fileId in file_ids:
i += 1
if function == u'trash':
print u'trashing %s for %s (%s of %s)' % (fileId, user, i, len(file_ids))
else:
print u'purging %s for %s (%s of %s)' % (fileId, user, i, len(file_ids))
callGAPI(drive.files(), function, fileId=fileId)
def printDriveFolderContents(feed, folderId, indent):
for f_file in feed:
for parent in f_file[u'parents']:
if folderId == parent[u'id']:
print u' ' * indent, convertUTF8(f_file[u'title'])
if f_file[u'mimeType'] == u'application/vnd.google-apps.folder':
printDriveFolderContents(feed, f_file[u'id'], indent+1)
def showDriveFileTree(users):
for user in users:
drive = buildGAPIObject(u'drive', user)
if user.find(u'@') == -1:
print u'ERROR: got %s, expected a full email address' % user
sys.exit(2)
root_folder = callGAPI(drive.about(), u'get', fields=u'rootFolderId')[u'rootFolderId']
sys.stderr.write(u'Getting all files for %s...\n' % user)
page_message = u' got %%%%total_items%%%% files for %s...\n' % user
feed = callGAPIpages(drive.files(), u'list', u'items', page_message=page_message,
fields=u'items(id,title,parents(id),mimeType),nextPageToken', maxResults=GC_Values[GC_DRIVE_MAX_RESULTS])
printDriveFolderContents(feed, root_folder, 0)
def deleteEmptyDriveFolders(users):
query = u'"me" in owners and mimeType = "application/vnd.google-apps.folder"'
for user in users:
drive = buildGAPIObject(u'drive', user)
if user.find(u'@') == -1:
print u'ERROR: got %s, expected a full email address' % user
sys.exit(2)
deleted_empty = True
while deleted_empty:
sys.stderr.write(u'Getting folders for %s...\n' % user)
page_message = u' got %%%%total_items%%%% folders for %s...\n' % user
feed = callGAPIpages(drive.files(), u'list', u'items', page_message=page_message,
q=query, fields=u'items(title,id),nextPageToken', maxResults=GC_Values[GC_DRIVE_MAX_RESULTS])
deleted_empty = False
for folder in feed:
children = callGAPI(drive.children(), u'list',
folderId=folder[u'id'], fields=u'items(id)', maxResults=1)
if not u'items' in children or len(children[u'items']) == 0:
print convertUTF8(u' deleting empty folder %s...' % folder[u'title'])
callGAPI(drive.files(), u'delete', fileId=folder[u'id'])
deleted_empty = True
else:
print convertUTF8(u' not deleting folder %s because it contains at least 1 item (%s)' % (folder[u'title'], children[u'items'][0][u'id']))
def doUpdateDriveFile(users):
convert = ocr = ocrLanguage = parent_query = local_filepath = media_body = fileIds = drivefilename = None
operation = u'update'
i = 5
body = {}
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'localfile':
local_filepath = sys.argv[i+1]
local_filename = ntpath.basename(local_filepath)
mimetype = mimetypes.guess_type(local_filepath)[0]
if mimetype == None:
mimetype = u'application/octet-stream'
body[u'title'] = local_filename
body[u'mimeType'] = mimetype
i += 2
elif sys.argv[i].lower() == u'copy':
operation = u'copy'
i += 1
elif sys.argv[i].lower() == u'id':
fileIds = [sys.argv[i+1],]
i += 2
elif sys.argv[i].lower() == u'drivefilename':
drivefilename = sys.argv[i+1]
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'newfilename':
body[u'title'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'convert']:
convert = True
i += 1
elif sys.argv[i].lower() in [u'ocr',]:
ocr = True
i += 1
elif sys.argv[i].lower() in [u'ocrlanguage',]:
ocrLanguage = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'restrict', u'restricted']:
if 'labels' not in body:
body[u'labels'] = dict()
if sys.argv[i+1].lower() in true_values:
body[u'labels'][u'restricted'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'labels'][u'restricted'] = False
else:
print u'ERROR: value for restricted must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'star', u'starred']:
if u'labels' not in body:
body[u'labels'] = dict()
if sys.argv[i+1].lower() in true_values:
body[u'labels'][u'starred'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'labels'][u'starred'] = False
else:
print u'ERROR: value for starred must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'trash', u'trashed']:
if u'labels' not in body:
body[u'labels'] = dict()
if sys.argv[i+1].lower() in true_values:
body[u'labels'][u'trashed'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'labels'][u'trashed'] = False
else:
print u'ERROR: value for trashed must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'view', u'viewed']:
if u'labels' not in body:
body[u'labels'] = dict()
if sys.argv[i+1].lower() in true_values:
body[u'labels'][u'viewed'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'labels'][u'viewed'] = False
else:
print u'ERROR: value for viewed must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'lastviewedbyme':
body[u'lastViewedByMe'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'modifieddate':
body[u'modifiedDate'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'description',]:
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'mimetype',]:
body[u'mimeType'] = sys.argv[i+1]
if body[u'mimeType'].lower() in [u'gdoc', u'gdocument']:
body[u'mimeType'] = u'application/vnd.google-apps.document'
elif body[u'mimeType'].lower() == u'gdrawing':
body[u'mimeType'] = u'application/vnd.google-apps.drawing'
elif body[u'mimeType'].lower() in [u'gfolder', u'gdirectory']:
body[u'mimeType'] = u'application/vnd.google-apps.folder'
elif body[u'mimeType'].lower() == u'gform':
body[u'mimeType'] = u'application/vnd.google-apps.form'
elif body[u'mimeType'].lower() == u'gfusion':
body[u'mimeType'] = u'application/vnd.google-apps.fusiontable'
elif body[u'mimeType'].lower() == u'gpresentation':
body[u'mimeType'] = u'application/vnd.google-apps.presentation'
elif body[u'mimeType'].lower() == u'gscript':
body[u'mimeType'] = u'application/vnd.google-apps.script'
elif body[u'mimeType'].lower() == u'gsite':
body[u'mimeType'] = u'application/vnd.google-apps.sites'
elif body[u'mimeType'].lower() in [u'gsheet', u'gspreadsheet']:
body[u'mimeType'] = u'application/vnd.google-apps.spreadsheet'
i += 2
elif sys.argv[i].lower() in [u'parentid']:
if u'parents' not in body:
body[u'parents'] = list()
body[u'parents'].append({u'id': sys.argv[i+1]})
i += 2
elif sys.argv[i].lower().replace(u'_', u'') in [u'parentname']:
parent_query = u'mimeType = "application/vnd.google-apps.folder" and "me" in owners and title = "%s"' % sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'writerscantshare']:
body[u'writersCanShare'] = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> update drivefile"' % sys.argv[i]
sys.exit(2)
if not fileIds and not drivefilename:
print u'ERROR: you need to specify either id or query in order to determine the file(s) to update'
sys.exit(2)
elif fileIds and drivefilename:
print u'ERROR: you cannot specify both an id and a query.'
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
if parent_query:
more_parents = doDriveSearch(drive, query=parent_query)
if u'parents' not in body:
body[u'parents'] = list()
for a_parent in more_parents:
body[u'parents'].append({u'id': a_parent})
if drivefilename:
fileIds = doDriveSearch(drive, query=u'"me" in owners and title = "%s"' % drivefilename)
if local_filepath:
media_body = googleapiclient.http.MediaFileUpload(local_filepath, mimetype=mimetype, resumable=True)
for fileId in fileIds:
if operation == u'update':
if media_body:
result = callGAPI(drive.files(), u'update', fileId=fileId, convert=convert, ocr=ocr, ocrLanguage=ocrLanguage, media_body=media_body, body=body, fields=u'id')
else:
result = callGAPI(drive.files(), u'patch', fileId=fileId, convert=convert, ocr=ocr, ocrLanguage=ocrLanguage, body=body, fields=u'id,labels')
try:
print u'Successfully updated %s drive file with content from %s' % (result[u'id'], local_filename)
except UnboundLocalError:
print u'Successfully updated drive file/folder ID %s' % (result[u'id'])
else:
result = callGAPI(drive.files(), u'copy', fileId=fileId, convert=convert, ocr=ocr, ocrLanguage=ocrLanguage, body=body, fields=u'id,labels')
print u'Successfully copied %s to %s' % (fileId, result[u'id'])
def createDriveFile(users):
convert = ocr = ocrLanguage = parent_query = local_filepath = media_body = None
i = 5
body = {}
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'localfile':
local_filepath = sys.argv[i+1]
local_filename = ntpath.basename(local_filepath)
mimetype = mimetypes.guess_type(local_filepath)[0]
if mimetype == None:
mimetype = u'application/octet-stream'
body[u'title'] = local_filename
body[u'mimeType'] = mimetype
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'drivefilename':
body[u'title'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'convert']:
convert = True
i += 1
elif sys.argv[i].lower() in [u'ocr',]:
ocr = True
i += 1
elif sys.argv[i].lower() in [u'ocrlanguage',]:
ocrLanguage = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'restrict', u'restricted']:
if u'labels' not in body:
body[u'labels'] = dict()
body[u'labels'][u'restricted'] = True
i += 1
elif sys.argv[i].lower() in [u'star', u'starred']:
if u'labels' not in body:
body[u'labels'] = dict()
body[u'labels'][u'starred'] = True
i += 1
elif sys.argv[i].lower() in [u'trash', u'trashed']:
if u'labels' not in body:
body[u'labels'] = dict()
body[u'labels'][u'trashed'] = True
i += 1
elif sys.argv[i].lower() in [u'view', u'viewed']:
if u'labels' not in body:
body[u'labels'] = dict()
body[u'labels'][u'viewed'] = True
i += 1
elif sys.argv[i].lower() == u'lastviewedbyme':
body[u'lastViewedByMe'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'modifieddate':
body[u'modifiedDate'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'description',]:
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'mimetype',]:
body[u'mimeType'] = sys.argv[i+1]
if body[u'mimeType'].lower() in [u'gdoc', u'gdocument']:
body[u'mimeType'] = u'application/vnd.google-apps.document'
elif body[u'mimeType'].lower() == u'gdrawing':
body[u'mimeType'] = u'application/vnd.google-apps.drawing'
elif body[u'mimeType'].lower() in [u'gfolder', u'gdirectory']:
body[u'mimeType'] = u'application/vnd.google-apps.folder'
elif body[u'mimeType'].lower() == u'gform':
body[u'mimeType'] = u'application/vnd.google-apps.form'
elif body[u'mimeType'].lower() == u'gfusion':
body[u'mimeType'] = u'application/vnd.google-apps.fusiontable'
elif body[u'mimeType'].lower() == u'gpresentation':
body[u'mimeType'] = u'application/vnd.google-apps.presentation'
elif body[u'mimeType'].lower() == u'gscript':
body[u'mimeType'] = u'application/vnd.google-apps.script'
elif body[u'mimeType'].lower() == u'gsite':
body[u'mimeType'] = u'application/vnd.google-apps.sites'
elif body[u'mimeType'].lower() in [u'gsheet', u'gspreadsheet']:
body[u'mimeType'] = u'application/vnd.google-apps.spreadsheet'
i += 2
elif sys.argv[i].lower() in [u'parentid']:
if u'parents' not in body:
body[u'parents'] = list()
body[u'parents'].append({u'id': sys.argv[i+1]})
i += 2
elif sys.argv[i].lower().replace(u'_', u'') in [u'parentname']:
parent_query = u'mimeType = "application/vnd.google-apps.folder" and "me" in owners and title = "%s"' % sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'writerscantshare']:
body[u'writersCanShare'] = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> create drivefile"' % sys.argv[i]
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
if parent_query:
more_parents = doDriveSearch(drive, query=parent_query)
if u'parents' not in body:
body[u'parents'] = list()
for a_parent in more_parents:
body[u'parents'].append({u'id': a_parent})
if local_filepath:
media_body = googleapiclient.http.MediaFileUpload(local_filepath, mimetype=mimetype, resumable=True)
result = callGAPI(drive.files(), u'insert', convert=convert, ocr=ocr, ocrLanguage=ocrLanguage, media_body=media_body, body=body, fields=u'id')
try:
print u'Successfully uploaded %s to Drive file ID %s' % (local_filename, result[u'id'])
except UnboundLocalError:
print u'Successfully created drive file/folder ID %s' % (result[u'id'])
def downloadDriveFile(users):
i = 5
query = fileIds = None
gdownload_format = u'openoffice'
target_folder = GC_Values[GC_DRIVE_DIR]
safe_filename_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
while i < len(sys.argv):
if sys.argv[i].lower() == u'id':
fileIds = [sys.argv[i+1],]
i += 2
elif sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'format':
gdownload_format = sys.argv[i+1].lower()
if gdownload_format not in [u'openoffice', u'ms', u'microsoft', u'micro$oft', u'pdf']:
print u'ERROR: format must be one of openoffice, microsoft or pdf. Got %s' % gdownload_format
sys.exit(2)
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'targetfolder':
target_folder = sys.argv[i+1]
if not os.path.isdir(target_folder):
os.makedirs(target_folder)
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> get drivefile"' % sys.argv[i]
sys.exit(2)
export_extensions = {u'application/pdf': '.pdf',
u'application/vnd.openxmlformats-officedocument.wordprocessingml.document': u'.docx',
u'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': u'.xlsx',
u'application/vnd.openxmlformats-officedocument.presentationml.presentation': u'.pptx',
u'application/vnd.oasis.opendocument.text': u'.odt',
u'application/x-vnd.oasis.opendocument.spreadsheet': u'.ods'}
if gdownload_format == u'openoffice':
export_formats = [u'application/vnd.oasis.opendocument.text',
u'application/x-vnd.oasis.opendocument.spreadsheet']
elif gdownload_format in [u'ms', u'microsoft', u'micro$oft']:
export_formats = [u'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
u'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
u'application/vnd.openxmlformats-officedocument.presentationml.presentation']
elif gdownload_format == u'pdf':
export_formats = [u'application/pdf',]
if not query and not fileIds:
print u'ERROR: need to specify a file ID with id parameter or a search query with the query parameter.'
sys.exit(2)
elif query and fileIds:
print u'ERROR: you cannot specify both the id and query parameters at the same time.'
sys.exit(2)
for user in users:
drive = buildGAPIObject(u'drive', user)
if query:
fileIds = doDriveSearch(drive, query=query)
else:
if fileIds[0][:8].lower() == u'https://' or fileIds[0][:7].lower() == u'http://':
fileIds[0] = fileIds[0][fileIds[0].find(u'/d/')+3:]
if fileIds[0].find(u'/') != -1:
fileIds[0] = fileIds[0][:fileIds[0].find(u'/')]
if not fileIds:
print u'No files to download for %s' % user
i = 0
for fileId in fileIds:
extension = None
result = callGAPI(drive.files(), u'get', fileId=fileId, fields=u'fileSize,title,mimeType,downloadUrl,exportLinks')
if result[u'mimeType'] == u'application/vnd.google-apps.folder':
print convertUTF8(u'Skipping download of folder %s' % result[u'title'])
continue
try:
result[u'fileSize'] = int(result[u'fileSize'])
if result[u'fileSize'] < 1024:
filesize = u'1kb'
elif result[u'fileSize'] < (1024 * 1024):
filesize = u'%skb' % (result[u'fileSize'] / 1024)
elif result[u'fileSize'] < (1024 * 1024 * 1024):
filesize = u'%smb' % (result[u'fileSize'] / 1024 / 1024)
else:
filesize = u'%sgb' % (result[u'fileSize'] / 1024 / 1024 / 1024)
my_line = u'Downloading: %%s of %s bytes' % filesize
except KeyError:
my_line = u'Downloading Google Doc: %s'
if u'downloadUrl' in result:
download_url = result[u'downloadUrl']
elif u'exportLinks' in result:
for avail_export_format in result[u'exportLinks']:
if avail_export_format in export_formats:
download_url = result[u'exportLinks'][avail_export_format]
try:
extension = export_extensions[avail_export_format]
except KeyError:
pass
break
else:
print convertUTF8(u'Skipping download of file {0}, Format {1} not available'.format(result[u'title'], u','.join(export_formats)))
continue
else:
print convertUTF8(u'Skipping download of file {0}, Format not downloadable')
continue
file_title = result[u'title']
safe_file_title = u''.join(c for c in file_title if c in safe_filename_chars)
filename = os.path.join(target_folder, safe_file_title)
if extension and filename.lower()[:len(extension)] != extension:
filename = u'%s%s' % (filename, extension)
y = 0
if os.path.isfile(filename):
while True:
y += 1
new_filename = os.path.join(target_folder, u'(%s)-%s' % (y, safe_file_title))
if extension and new_filename.lower()[:len(extension)] != extension:
new_filename = u'%s%s' % (new_filename, extension)
if not os.path.isfile(new_filename):
break
filename = new_filename
print convertUTF8(my_line % filename)
_, content = drive._http.request(download_url)
writeFile(filename, content, continueOnError=True)
def showDriveFileInfo(users):
for user in users:
fileId = sys.argv[5]
drive = buildGAPIObject(u'drive', user)
feed = callGAPI(drive.files(), u'get', fileId=fileId)
for setting in feed:
if setting == u'kind':
continue
setting_type = str(type(feed[setting]))
if setting_type == u"<type 'list'>":
print u'%s:' % setting
for settin in feed[setting]:
if settin == u'kind':
continue
settin_type = str(type(settin))
if settin_type == u"<type 'dict'>":
for setti in settin:
if setti == u'kind':
continue
print convertUTF8(u' %s: %s' % (setti, settin[setti]))
print u''
elif setting_type == u"<type 'dict'>":
print u'%s:' % setting
for settin in feed[setting]:
if settin == u'kind':
continue
print convertUTF8(u' %s: %s' % (settin, feed[setting][settin]))
else:
print convertUTF8(u'%s: %s' % (setting, feed[setting]))
def transferSecCals(users):
target_user = sys.argv[5]
remove_source_user = True
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'keepuser':
remove_source_user = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> transfer seccals"' % sys.argv[i]
sys.exit(2)
for user in users:
source_cal = buildGAPIObject(u'calendar', user)
source_calendars = callGAPIpages(source_cal.calendarList(), u'list', u'items', minAccessRole=u'owner', showHidden=True, fields=u'items(id),nextPageToken')
for source_cal in source_calendars:
if source_cal[u'id'].find(u'@group.calendar.google.com') != -1:
doCalendarAddACL(calendarId=source_cal[u'id'], act_as=user, role=u'owner', scope=u'user', entity=target_user)
if remove_source_user:
doCalendarAddACL(calendarId=source_cal[u'id'], act_as=target_user, role=u'none', scope=u'user', entity=user)
def transferDriveFiles(users):
target_user = sys.argv[5]
remove_source_user = True
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'keepuser':
remove_source_user = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> transfer drive"' % sys.argv[i]
sys.exit(2)
target_drive = buildGAPIObject(u'drive', target_user)
target_about = callGAPI(target_drive.about(), u'get', fields=u'quotaBytesTotal,quotaBytesUsed,rootFolderId')
target_drive_free = int(target_about[u'quotaBytesTotal']) - int(target_about[u'quotaBytesUsed'])
for user in users:
counter = 0
source_drive = buildGAPIObject(u'drive', user)
source_about = callGAPI(source_drive.about(), u'get', fields=u'quotaBytesTotal,quotaBytesUsed,rootFolderId, permissionId')
source_drive_size = int(source_about[u'quotaBytesUsed'])
if target_drive_free < source_drive_size:
systemErrorExit(4, MESSAGE_NO_TRANSFER_LACK_OF_DISK_SPACE.format(source_drive_size / 1024 / 1024, target_drive_free / 1024 / 1024))
print u'Source drive size: %smb Target drive free: %smb' % (source_drive_size / 1024 / 1024, target_drive_free / 1024 / 1024)
target_drive_free = target_drive_free - source_drive_size # prep target_drive_free for next user
source_root = source_about[u'rootFolderId']
source_permissionid = source_about[u'permissionId']
print u"Getting file list for source user: %s..." % user
page_message = u' got %%total_items%% files\n'
source_drive_files = callGAPIpages(source_drive.files(), u'list', u'items', page_message=page_message,
q=u"'me' in owners and trashed = false", fields=u'items(id,parents,mimeType),nextPageToken')
all_source_file_ids = []
for source_drive_file in source_drive_files:
all_source_file_ids.append(source_drive_file[u'id'])
total_count = len(source_drive_files)
print u"Getting folder list for target user: %s..." % target_user
page_message = u' got %%total_items%% folders\n'
target_folders = callGAPIpages(target_drive.files(), u'list', u'items', page_message=page_message,
q=u"'me' in owners and mimeType = 'application/vnd.google-apps.folder'", fields=u'items(id,title),nextPageToken')
got_top_folder = False
all_target_folder_ids = []
for target_folder in target_folders:
all_target_folder_ids.append(target_folder[u'id'])
if (not got_top_folder) and target_folder[u'title'] == u'%s old files' % user:
target_top_folder = target_folder[u'id']
got_top_folder = True
if not got_top_folder:
create_folder = callGAPI(target_drive.files(), u'insert', body={u'title': u'%s old files' % user, u'mimeType': u'application/vnd.google-apps.folder'}, fields=u'id')
target_top_folder = create_folder[u'id']
transferred_files = []
while True: # we loop thru, skipping files until all of their parents are done
skipped_files = False
for drive_file in source_drive_files:
file_id = drive_file[u'id']
if file_id in transferred_files:
continue
source_parents = drive_file[u'parents']
skip_file_for_now = False
for source_parent in source_parents:
if source_parent[u'id'] not in all_source_file_ids and source_parent[u'id'] not in all_target_folder_ids:
continue # means this parent isn't owned by source or target, shouldn't matter
if source_parent[u'id'] not in transferred_files and source_parent[u'id'] != source_root:
#print u'skipping %s' % file_id
skipped_files = skip_file_for_now = True
break
if skip_file_for_now:
continue
else:
transferred_files.append(drive_file[u'id'])
counter += 1
print u'Changing owner for file %s (%s/%s)' % (drive_file[u'id'], counter, total_count)
body = {u'role': u'owner', u'type': u'user', u'value': target_user}
callGAPI(source_drive.permissions(), u'insert', soft_errors=True, fileId=file_id, sendNotificationEmails=False, body=body)
target_parents = []
for parent in source_parents:
try:
if parent[u'isRoot']:
target_parents.append({u'id': target_top_folder})
else:
target_parents.append({u'id': parent[u'id']})
except TypeError:
pass
callGAPI(target_drive.files(), u'patch', soft_errors=True, retry_reasons=[u'notFound'], fileId=file_id, body={u'parents': target_parents})
if remove_source_user:
callGAPI(target_drive.permissions(), u'delete', soft_errors=True, fileId=file_id, permissionId=source_permissionid)
if not skipped_files:
break
def doImap(users):
if sys.argv[4].lower() in true_values:
enable = True
elif sys.argv[4].lower() in false_values:
enable = False
else:
print u'ERROR: value for "gam <users> imap" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting IMAP Access to %s for %s (%s of %s)" % (str(enable), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateImap', soft_errors=True, username=user, enable=enable)
def getImap(users):
emailsettings = getEmailSettingsObject()
i = 1
count = len(users)
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
imapsettings = callGData(emailsettings, u'GetImap', soft_errors=True, username=user)
try:
print u'User %s IMAP Enabled:%s (%s of %s)' % (user+u'@'+emailsettings.domain, imapsettings[u'enable'], i, count)
except TypeError:
pass
i += 1
def getProductAndSKU(sku):
if sku.lower() in [u'apps', u'gafb', u'gafw']:
sku = u'Google-Apps-For-Business'
elif sku.lower() in [u'gams',]:
sku = u'Google-Apps-For-Postini'
elif sku.lower() in [u'gau', u'unlimited', u'd4w', u'dfw']:
sku = u'Google-Apps-Unlimited'
elif sku.lower() == u'coordinate':
sku = u'Google-Coordinate'
elif sku.lower() == u'vault':
sku = u'Google-Vault'
elif sku.lower() in [u'vfe',]:
sku = u'Google-Vault-Former-Employee'
elif sku.lower() in [u'drive-20gb', u'drive20gb', u'20gb']:
sku = u'Google-Drive-storage-20GB'
elif sku.lower() in [u'drive-50gb', u'drive50gb', u'50gb']:
sku = u'Google-Drive-storage-50GB'
elif sku.lower() in [u'drive-200gb', u'drive200gb', u'200gb']:
sku = u'Google-Drive-storage-200GB'
elif sku.lower() in [u'drive-400gb', u'drive400gb', u'400gb']:
sku = u'Google-Drive-storage-400GB'
elif sku.lower() in [u'drive-1tb', u'drive1tb', u'1tb']:
sku = u'Google-Drive-storage-1TB'
elif sku.lower() in [u'drive-2tb', u'drive2tb', u'2tb']:
sku = u'Google-Drive-storage-2TB'
elif sku.lower() in [u'drive-4tb', u'drive4tb', u'4tb']:
sku = u'Google-Drive-storage-4TB'
elif sku.lower() in [u'drive-4tb', u'drive8tb', u'8tb']:
sku = u'Google-Drive-storage-8TB'
elif sku.lower() in [u'drive-16tb', u'drive16tb', u'16tb']:
sku = u'Google-Drive-storage-16TB'
if sku[:20] == u'Google-Drive-storage':
product = u'Google-Drive-storage'
else:
try:
product = re.search(u'^([A-Z,a-z]*-[A-Z,a-z]*)', sku).group(1)
except AttributeError:
product = sku
return (product, sku)
def doLicense(users, operation):
lic = buildGAPIObject(u'licensing')
sku = sys.argv[5]
productId, skuId = getProductAndSKU(sku)
for user in users:
if user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
if operation == u'delete':
callGAPI(lic.licenseAssignments(), operation, soft_errors=True, productId=productId, skuId=skuId, userId=user)
elif operation == u'insert':
callGAPI(lic.licenseAssignments(), operation, soft_errors=True, productId=productId, skuId=skuId, body={u'userId': user})
elif operation == u'patch':
try:
old_sku = sys.argv[6]
if old_sku.lower() == u'from':
old_sku = sys.argv[7]
except KeyError:
print u'ERROR: You need to specify the user\'s old SKU as the last argument'
sys.exit(2)
_, old_sku = getProductAndSKU(old_sku)
callGAPI(lic.licenseAssignments(), operation, soft_errors=True, productId=productId, skuId=old_sku, userId=user, body={u'skuId': skuId})
def doPop(users):
if sys.argv[4].lower() in true_values:
enable = True
elif sys.argv[4].lower() in false_values:
enable = False
else:
print u'ERROR: value for "gam <users> pop" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
enable_for = u'ALL_MAIL'
action = u'KEEP'
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'for':
if sys.argv[i+1].lower() == u'allmail':
enable_for = u'ALL_MAIL'
i += 2
elif sys.argv[i+1].lower() == u'newmail':
enable_for = u'MAIL_FROM_NOW_ON'
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> pop for"' % sys.argv[i]
sys.exit(2)
elif sys.argv[i].lower() == u'action':
if sys.argv[i+1].lower() == u'keep':
action = u'KEEP'
i += 2
elif sys.argv[i+1].lower() == u'archive':
action = u'ARCHIVE'
i += 2
elif sys.argv[i+1].lower() == u'delete':
action = u'DELETE'
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> pop action"' % sys.argv[i]
sys.exit(2)
elif sys.argv[i].lower() == u'confirm':
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> pop"' % sys.argv[i]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting POP Access to %s for %s (%s of %s)" % (str(enable), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdatePop', soft_errors=True, username=user, enable=enable, enable_for=enable_for, action=action)
def getPop(users):
emailsettings = getEmailSettingsObject()
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
popsettings = callGData(emailsettings, u'GetPop', soft_errors=True, username=user)
try:
print u'User %s POP Enabled:%s Action:%s' % (user+u'@'+emailsettings.domain, popsettings[u'enable'], popsettings[u'action'])
except TypeError:
pass
def doSendAs(users):
sendas = sys.argv[4]
sendasName = sys.argv[5]
make_default = reply_to = None
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'default':
make_default = True
i += 1
elif sys.argv[i].lower() == u'replyto':
reply_to = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> sendas"' % sys.argv[i]
sys.exit(2)
emailsettings = getEmailSettingsObject()
if sendas.find(u'@') < 0:
sendas = sendas+u'@'+GC_Values[GC_DOMAIN]
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Allowing %s to send as %s (%s of %s)" % (user+u'@'+emailsettings.domain, sendas, i, count)
i += 1
callGData(emailsettings, u'CreateSendAsAlias', soft_errors=True, username=user, name=sendasName, address=sendas, make_default=make_default, reply_to=reply_to)
def showSendAs(users):
emailsettings = getEmailSettingsObject()
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
print u'%s has the following send as aliases:' % (user+u'@'+emailsettings.domain)
sendases = callGData(emailsettings, u'GetSendAsAlias', soft_errors=True, username=user)
try:
for sendas in sendases:
if sendas[u'isDefault'] == u'true':
default = u'yes'
else:
default = u'no'
if sendas[u'replyTo']:
replyto = u' Reply To:<'+sendas[u'replyTo']+'>'
else:
replyto = u''
if sendas[u'verified'] == u'true':
verified = u'yes'
else:
verified = u'no'
print u' "%s" <%s>%s Default:%s Verified:%s' % (sendas[u'name'], sendas[u'address'], replyto, default, verified)
except TypeError:
pass
print u''
def doLanguage(users):
language = sys.argv[4]
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting the language for %s to %s (%s of %s)" % (user+u'@'+emailsettings.domain, language, i, count)
i += 1
callGData(emailsettings, u'UpdateLanguage', soft_errors=True, username=user, language=language)
def doUTF(users):
if sys.argv[4].lower() in true_values:
SetUTF = True
elif sys.argv[4].lower() in false_values:
SetUTF = False
else:
print u'ERROR: value for "gam <users> utf" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting UTF-8 to %s for %s (%s of %s)" % (str(SetUTF), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateGeneral', soft_errors=True, username=user, unicode=SetUTF)
def doPageSize(users):
if sys.argv[4] == u'25' or sys.argv[4] == u'50' or sys.argv[4] == u'100':
PageSize = sys.argv[4]
else:
print u'ERROR: %s is not a valid argument for "gam <users> pagesize"' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Page Size to %s for %s (%s of %s)" % (PageSize, user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateGeneral', soft_errors=True, username=user, page_size=PageSize)
def doShortCuts(users):
if sys.argv[4].lower() in true_values:
SetShortCuts = True
elif sys.argv[4].lower() in false_values:
SetShortCuts = False
else:
print u'ERROR: value for "gam <users> shortcuts" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Keyboard Short Cuts to %s for %s (%s of %s)" % (str(SetShortCuts), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateGeneral', soft_errors=True, username=user, shortcuts=SetShortCuts)
def doArrows(users):
if sys.argv[4].lower() in true_values:
SetArrows = True
elif sys.argv[4].lower() in false_values:
SetArrows = False
else:
print u'ERROR: value for "gam <users> arrows" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Personal Indicator Arrows to %s for %s (%s of %s)" % (str(SetArrows), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateGeneral', soft_errors=True, username=user, arrows=SetArrows)
def doSnippets(users):
if sys.argv[4].lower() in true_values:
SetSnippets = True
elif sys.argv[4].lower() in false_values:
SetSnippets = False
else:
print u'ERROR: value for "gam <users> snippets" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Preview Snippets to %s for %s (%s of %s)" % (str(SetSnippets), user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateGeneral', soft_errors=True, username=user, snippets=SetSnippets)
def doLabel(users, i):
label = sys.argv[i]
i += 1
body = {u'name': label}
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'labellistvisibility':
if sys.argv[i+1].lower().replace(u'_', u'') == u'hide':
body[u'labelListVisibility'] = u'labelHide'
elif sys.argv[i+1].lower().replace(u'_', u'') == u'show':
body[u'labelListVisibility'] = u'labelShow'
elif sys.argv[i+1].lower().replace(u'_', u'') == u'showifunread':
body[u'labelListVisibility'] = u'labelShowIfUnread'
else:
print u'ERROR: label_list_visibility must be one of hide, show or show_if_unread, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'messagelistvisibility':
if sys.argv[i+1].lower().replace(u'_', u'') == u'hide':
body[u'messageListVisibility'] = u'hide'
elif sys.argv[i+1].lower().replace(u'_', u'') == u'show':
body[u'messageListVisibility'] = u'show'
else:
print u'ERROR: message_list_visibility must be one of hide or show, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
else:
print u'ERROR: %s is not a valid argument for this command.' % sys.argv[i]
sys.exit(2)
count = len(users)
i = 1
for user in users:
gmail = buildGAPIObject(u'gmail', user)
print u"Creating label %s for %s (%s of %s)" % (label, user, i, count)
i += 1
callGAPI(gmail.users().labels(), u'create', soft_errors=True, userId=user, body=body)
def doDeleteMessages(trashOrDelete, users):
query = None
doIt = False
maxToDelete = 1
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'doit':
doIt = True
i += 1
elif sys.argv[i].lower().replace(u'_', u'') == u'maxtodelete':
maxToDelete = int(sys.argv[i+1])
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> delete messages"' % sys.argv[i]
sys.exit(2)
if not query:
print u'ERROR: No query specified. You must specify some query!'
sys.exit(2)
for user in users:
print u'Searching messages for %s' % user
gmail = buildGAPIObject(u'gmail', user)
page_message = u'Got %%%%total_items%%%% messages for user %s' % user
listResult = callGAPIpages(gmail.users().messages(), u'list', u'messages', page_message=page_message,
userId=u'me', q=query, includeSpamTrash=True, soft_errors=True)
del_count = len(listResult)
if not doIt:
print u'would try to delete %s messages for user %s (max %s)\n' % (del_count, user, maxToDelete)
continue
elif del_count > maxToDelete:
print u'WARNING: refusing to delete ANY messages for %s since max_to_delete is %s and messages to be deleted is %s\n' % (user, maxToDelete, del_count)
continue
i = 0
for del_me in listResult:
i += 1
print u' %s message %s for user %s (%s/%s)' % (trashOrDelete, del_me[u'id'], user, i, del_count)
callGAPI(gmail.users().messages(), trashOrDelete,
id=del_me[u'id'], userId=u'me')
def doDeleteLabel(users):
label = sys.argv[5]
for user in users:
gmail = buildGAPIObject(u'gmail', user)
print u'Getting all labels for %s...' % user
labels = callGAPI(gmail.users().labels(), u'list', userId=user, fields=u'labels(name,id,type)')
del_labels = []
if label == u'--ALL_LABELS--':
for del_label in labels[u'labels']:
if del_label[u'type'] == u'system':
continue
del_labels.append(del_label)
elif label[:6].lower() == u'regex:':
regex = label[6:]
p = re.compile(regex)
for del_label in labels[u'labels']:
if del_label[u'type'] == u'system':
continue
elif p.match(del_label[u'name']):
del_labels.append(del_label)
else:
got_label = False
for del_label in labels[u'labels']:
if label.lower() == del_label[u'name'].lower():
del_labels.append(del_label)
got_label = True
break
if not got_label:
print u' Error: no such label for %s' % user
continue
del_me_count = len(del_labels)
i = 1
dbatch = googleapiclient.http.BatchHttpRequest()
for del_me in del_labels:
print u' deleting label %s (%s/%s)' % (del_me[u'name'], i, del_me_count)
i += 1
dbatch.add(gmail.users().labels().delete(userId=user, id=del_me[u'id']), callback=gmail_del_result)
if len(dbatch._order) == 10:
dbatch.execute()
dbatch = googleapiclient.http.BatchHttpRequest()
if len(dbatch._order) > 0:
dbatch.execute()
def gmail_del_result(request_id, response, exception):
if exception is not None:
print exception
def showLabels(users):
i = 5
show_system = True
while i < len(sys.argv):
if sys.argv[i].lower().replace(u'_', u'') == u'onlyuser':
show_system = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> show labels"' % sys.argv[i]
sys.exit(2)
for user in users:
gmail = buildGAPIObject(u'gmail', user)
labels = callGAPI(gmail.users().labels(), u'list', userId=user, soft_errors=True)
if labels:
for label in labels[u'labels']:
if label[u'type'] == u'system' and not show_system:
continue
print convertUTF8(label[u'name'])
for a_key in label:
if a_key == u'name':
continue
print u' %s: %s' % (a_key, label[a_key])
print u''
def showGmailProfile(users):
todrive = False
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for gam <users> show gmailprofiles' % sys.argv[i]
sys.exit(2)
profiles = [{}]
for user in users:
print u'Getting Gmail profile for %s' % user
gmail = buildGAPIObject(u'gmail', user, soft_errors=True)
if not gmail:
continue
results = callGAPI(gmail.users(), u'getProfile', userId=u'me', soft_errors=True)
if results:
for item in results:
if item not in profiles[0]:
profiles[0][item] = item
profiles.append(results)
output_csv(csv_list=profiles, titles=profiles[0], list_type=u'Gmail Profiles', todrive=todrive)
def updateLabels(users):
label_name = sys.argv[5]
body = {}
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'name'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower().replace(u'_', u'') == u'messagelistvisibility':
body[u'messageListVisibility'] = sys.argv[i+1].lower()
if body[u'messageListVisibility'] not in [u'hide', u'show']:
print u'ERROR: message_list_visibility should be show or hide, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower().replace(u' ', u'') == u'labellistvisibility':
if sys.argv[i+1].lower().replace(u'_', u'') == u'showifunread':
body[u'labelListVisibility'] = u'labelShowIfUnread'
elif sys.argv[i+1].lower().replace(u'_', u'') == u'show':
body[u'labelListVisibility'] = u'labelShow'
elif sys.argv[i+1].lower().replace(u'_', u'') == u'hide':
body[u'labelListVisibility'] = u'labelHide'
else:
print u'ERROR: label_list_visibility should be hide, show or show_if_unread, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> update labels"' % sys.argv[i]
sys.exit(2)
for user in users:
gmail = buildGAPIObject(u'gmail', user)
labels = callGAPI(gmail.users().labels(), u'list', userId=user, fields=u'labels(id,name)')
label_id = None
for label in labels[u'labels']:
if label[u'name'].lower() == label_name.lower():
label_id = label[u'id']
break
if not label_id:
print u'Error: user does not have a label named %s' % label_name
callGAPI(gmail.users().labels(), u'patch', soft_errors=True, userId=user, id=label_id, body=body)
def renameLabels(users):
search = u'^Inbox/(.*)$'
replace = u'%s'
merge = False
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'search':
search = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'replace':
replace = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'merge':
merge = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> rename label"' % sys.argv[i]
sys.exit(2)
pattern = re.compile(search, re.IGNORECASE)
for user in users:
gmail = buildGAPIObject(u'gmail', user)
labels = callGAPI(gmail.users().labels(), u'list', userId=user)
for label in labels[u'labels']:
if label[u'type'] == u'system':
continue
match_result = re.search(pattern, label[u'name'])
if match_result != None:
new_label_name = replace % match_result.groups()
print u' Renaming "%s" to "%s"' % (label[u'name'], new_label_name)
try:
callGAPI(gmail.users().labels(), u'patch', soft_errors=True, throw_reasons=[u'aborted'], id=label[u'id'], userId=user, body={u'name': new_label_name})
except googleapiclient.errors.HttpError:
if merge:
print u' Merging %s label to existing %s label' % (label[u'name'], new_label_name)
q = u'label:"%s"' % label[u'name']
messages_to_relabel = callGAPIpages(gmail.users().messages(), u'list', u'messages', userId=user, q=q)
if len(messages_to_relabel) > 0:
for new_label in labels[u'labels']:
if new_label[u'name'].lower() == new_label_name.lower():
new_label_id = new_label[u'id']
body = {u'addLabelIds': [new_label_id]}
break
i = 1
for message_to_relabel in messages_to_relabel:
print u' relabeling message %s (%s/%s)' % (message_to_relabel[u'id'], i, len(messages_to_relabel))
callGAPI(gmail.users().messages(), u'modify', userId=user, id=message_to_relabel[u'id'], body=body)
i += 1
else:
print u' no messages with %s label' % label[u'name']
print u' Deleting label %s' % label[u'name']
callGAPI(gmail.users().labels(), u'delete', id=label[u'id'], userId=user)
else:
print u' Error: looks like %s already exists, not renaming. Use the "merge" argument to merge the labels' % new_label_name
def doFilter(users):
i = 4 # filter arguments start here
from_ = to = subject = has_the_word = does_not_have_the_word = has_attachment = label = should_mark_as_read = should_archive = should_star = forward_to = should_trash = should_not_spam = None
haveCondition = False
while sys.argv[i].lower() in [u'from', u'to', u'subject', u'haswords', u'nowords', u'musthaveattachment']:
if sys.argv[i].lower() == u'from':
from_ = sys.argv[i+1]
i += 2
haveCondition = True
elif sys.argv[i].lower() == u'to':
to = sys.argv[i+1]
i += 2
haveCondition = True
elif sys.argv[i].lower() == u'subject':
subject = sys.argv[i+1]
i += 2
haveCondition = True
elif sys.argv[i].lower() == u'haswords':
has_the_word = sys.argv[i+1]
i += 2
haveCondition = True
elif sys.argv[i].lower() == u'nowords':
does_not_have_the_word = sys.argv[i+1]
i += 2
haveCondition = True
elif sys.argv[i].lower() == u'musthaveattachment':
has_attachment = True
i += 1
haveCondition = True
if not haveCondition:
print u'ERROR: you must specifiy a condition for "gam <users> filter"'
sys.exit(2)
haveAction = False
while i < len(sys.argv):
if sys.argv[i].lower() == u'label':
label = sys.argv[i+1]
i += 2
haveAction = True
elif sys.argv[i].lower() == u'markread':
should_mark_as_read = True
i += 1
haveAction = True
elif sys.argv[i].lower() == u'archive':
should_archive = True
i += 1
haveAction = True
elif sys.argv[i].lower() == u'star':
should_star = True
i += 1
haveAction = True
elif sys.argv[i].lower() == u'forward':
forward_to = sys.argv[i+1]
i += 2
haveAction = True
elif sys.argv[i].lower() == u'trash':
should_trash = True
i += 1
haveAction = True
elif sys.argv[i].lower() == u'neverspam':
should_not_spam = True
i += 1
haveAction = True
else:
print u'ERROR: %s is not a valid argument for "gam <users> filter"' % sys.argv[i]
sys.exit(2)
if not haveAction:
print u'ERROR: you must specifiy an action for "gam <users> filter"'
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Creating filter for %s (%s of %s)" % (user+'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'CreateFilter', soft_errors=True,
username=user, from_=from_, to=to, subject=subject, has_the_word=has_the_word, does_not_have_the_word=does_not_have_the_word,
has_attachment=has_attachment, label=label, should_mark_as_read=should_mark_as_read, should_archive=should_archive,
should_star=should_star, forward_to=forward_to, should_trash=should_trash, should_not_spam=should_not_spam)
def doForward(users):
action = forward_to = None
gotAction = gotForward = False
if sys.argv[4].lower() in true_values:
enable = True
elif sys.argv[4].lower() in false_values:
enable = False
else:
print u'ERROR: value for "gam <users> forward" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() in [u'keep', u'archive', u'delete']:
action = sys.argv[i].upper()
i += 1
gotAction = True
elif sys.argv[i].lower() == u'confirm':
i += 1
elif sys.argv[i].find(u'@') != -1:
forward_to = sys.argv[i]
gotForward = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam <users> forward"' % sys.argv[i]
sys.exit(2)
if enable and (not gotAction or not gotForward):
print u'ERROR: you must specify an action and a forwarding address for "gam <users> forward'
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Turning forward %s for %s, emails will be %s (%s of %s)" % (sys.argv[4], user+'@'+emailsettings.domain, action, i, count)
i += 1
callGData(emailsettings, u'UpdateForwarding', soft_errors=True, username=user, enable=enable, action=action, forward_to=forward_to)
def getForward(users):
emailsettings = getEmailSettingsObject()
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
forward = callGData(emailsettings, u'GetForward', soft_errors=True, username=user)
try:
print u"User %s: Forward To:%s Enabled:%s Action:%s" % (user+u'@'+emailsettings.domain, forward[u'forwardTo'], forward[u'enable'], forward[u'action'])
except TypeError:
pass
def doSignature(users):
import cgi
if sys.argv[4].lower() == u'file':
signature = cgi.escape(readFile(sys.argv[5]).replace(u'\\n', u'
').replace(u'"', u"'"))
else:
signature = cgi.escape(sys.argv[4]).replace(u'\\n', u'
').replace(u'"', u"'")
xmlsig = u'''<?xml version="1.0" encoding="utf-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom" xmlns:apps="http://schemas.google.com/apps/2006">
<apps:property name="signature" value="%s" />
</atom:entry>''' % signature
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Signature for %s (%s of %s)" % (user+u'@'+emailsettings.domain, i, count)
uri = u'https://apps-apis.google.com/a/feeds/emailsettings/2.0/%s/%s/signature' % (emailsettings.domain, user)
i += 1
callGData(emailsettings, u'Put', soft_errors=True, data=xmlsig, uri=uri)
def getSignature(users):
emailsettings = getEmailSettingsObject()
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
result = callGData(emailsettings, u'GetSignature', soft_errors=True, username=user)
signature = result.get(u'signature', u'None') if result else u'None'
if not signature:
signature = u'None'
sys.stdout.write(u"User %s signature:\n " % (user+u'@'+emailsettings.domain))
print convertUTF8(u" %s" % signature)
def doWebClips(users):
if sys.argv[4].lower() in true_values:
enable = True
elif sys.argv[4].lower() in false_values:
enable = False
else:
print u'ERROR: value for "gam <users> webclips" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
emailsettings = getEmailSettingsObject()
count = len(users)
i = 1
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Turning Web Clips %s for %s (%s of %s)" % (sys.argv[4], user+u'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateWebClipSettings', soft_errors=True, username=user, enable=enable)
def doVacation(users):
subject = message = u''
if sys.argv[4].lower() in true_values:
enable = True
elif sys.argv[4].lower() in false_values:
enable = False
else:
print u'ERROR: value for "gam <users> vacation" must be true or false, got %s' % sys.argv[4]
sys.exit(2)
contacts_only = domain_only = False
start_date = end_date = None
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'subject':
subject = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'message':
message = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'contactsonly':
contacts_only = True
i += 1
elif sys.argv[i].lower() == u'domainonly':
domain_only = True
i += 1
elif sys.argv[i].lower() == u'startdate':
start_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'enddate':
end_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'file':
message = readFile(sys.argv[i+1])
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam <users> vacation"' % sys.argv[i]
sys.exit(2)
i = 1
count = len(users)
emailsettings = getEmailSettingsObject()
message = message.replace(u'\\n', u'\n')
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN] #make sure it's back at default domain
print u"Setting Vacation for %s (%s of %s)" % (user+'@'+emailsettings.domain, i, count)
i += 1
callGData(emailsettings, u'UpdateVacation',
soft_errors=True,
username=user, enable=enable, subject=subject, message=message,
contacts_only=contacts_only, domain_only=domain_only, start_date=start_date, end_date=end_date)
def getVacation(users):
emailsettings = getEmailSettingsObject()
for user in users:
if user.find(u'@') > 0:
emailsettings.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
else:
emailsettings.domain = GC_Values[GC_DOMAIN]
vacationsettings = callGData(emailsettings, u'GetVacation', soft_errors=True, username=user)
try:
print convertUTF8(u'''User %s
Enabled: %s
Contacts Only: %s
Domain Only: %s
Subject: %s
Message: %s
Start Date: %s
End Date: %s
''' % (user+u'@'+emailsettings.domain, vacationsettings[u'enable'], vacationsettings[u'contactsOnly'], vacationsettings[u'domainOnly'], vacationsettings[u'subject'],
vacationsettings[u'message'], vacationsettings[u'startDate'], vacationsettings[u'endDate']))
except TypeError:
pass
def doDelSchema():
cd = buildGAPIObject(u'directory')
schemaKey = sys.argv[3]
callGAPI(cd.schemas(), u'delete', customerId=GC_Values[GC_CUSTOMER_ID], schemaKey=schemaKey)
print u'Deleted schema %s' % schemaKey
def doCreateOrUpdateUserSchema():
cd = buildGAPIObject(u'directory')
schemaName = sys.argv[3]
body = {u'schemaName': schemaName, u'fields': []}
i = 4
while i < len(sys.argv):
if sys.argv[i] in [u'field']:
a_field = {u'fieldName': sys.argv[i+1]}
i += 2
while True:
if sys.argv[i].lower() in [u'type']:
a_field[u'fieldType'] = sys.argv[i+1].upper()
if a_field[u'fieldType'] not in [u'BOOL', u'DOUBLE', u'EMAIL', u'INT64', u'PHONE', u'STRING']:
print u'ERROR: type must be bool, double, email, int64, phone or string. Got %s' % a_field[u'fieldType']
sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'multivalued']:
a_field[u'multiValued'] = True
i += 1
elif sys.argv[i].lower() in [u'indexed']:
a_field[u'indexed'] = True
i += 1
elif sys.argv[i].lower() in [u'restricted']:
a_field[u'readAccessType'] = u'ADMINS_AND_SELF'
i += 1
elif sys.argv[i].lower() in [u'range']:
a_field[u'numericIndexingSpec'] = {u'minValue': sys.argv[i+1], u'maxValue': sys.argv[i+2]}
i += 3
elif sys.argv[i].lower() in [u'endfield']:
body[u'fields'].append(a_field)
i += 1
break
else:
print u'ERROR: %s is not a valid argument for "gam create schema"' % sys.argv[i]
sys.exit(2)
else:
print u'ERROR: %s is not a valid argument for "gam create schema"' % sys.argv[i]
sys.exit(2)
if sys.argv[1].lower() == u'create':
result = callGAPI(cd.schemas(), u'insert', customerId=GC_Values[GC_CUSTOMER_ID], body=body)
print u'Created user schema %s' % result[u'schemaName']
elif sys.argv[1].lower() == u'update':
result = callGAPI(cd.schemas(), u'update', customerId=GC_Values[GC_CUSTOMER_ID], body=body, schemaKey=schemaName)
print u'Updated user schema %s' % result[u'schemaName']
def doPrintUserSchemas():
cd = buildGAPIObject(u'directory')
schemas = callGAPI(cd.schemas(), u'list', customerId=GC_Values[GC_CUSTOMER_ID])
if not schemas or u'schemas' not in schemas:
return
for schema in schemas[u'schemas']:
print u'Schema: %s' % schema[u'schemaName']
for a_key in schema:
if a_key not in [u'schemaName', u'fields', u'etag', u'kind']:
print u'%s: %s' % (a_key, schema[a_key])
print
for field in schema[u'fields']:
print u' Field: %s' % field[u'fieldName']
for a_key in field:
if a_key not in [u'fieldName', u'kind', u'etag']:
print u' %s: %s' % (a_key, field[a_key])
print
print
def doGetUserSchema():
cd = buildGAPIObject(u'directory')
schemaKey = sys.argv[3]
schema = callGAPI(cd.schemas(), u'get', customerId=GC_Values[GC_CUSTOMER_ID], schemaKey=schemaKey)
print u'Schema: %s' % schema[u'schemaName']
for a_key in schema:
if a_key not in [u'schemaName', u'fields', u'etag', u'kind']:
print u'%s: %s' % (a_key, schema[a_key])
print
for field in schema[u'fields']:
print u' Field: %s' % field[u'fieldName']
for a_key in field:
if a_key not in [u'fieldName', u'kind', u'etag']:
print u' %s: %s' % (a_key, field[a_key])
print
def doCreateUser():
cd = buildGAPIObject(u'directory')
body = dict()
body[u'name'] = dict()
body[u'primaryEmail'] = sys.argv[3]
if body[u'primaryEmail'].find(u'@') == -1:
body[u'primaryEmail'] = u'%s@%s' % (body[u'primaryEmail'], GC_Values[GC_DOMAIN])
gotFirstName = gotLastName = do_admin = False
need_to_hash_password = need_password = True
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'firstname':
body[u'name'][u'givenName'] = sys.argv[i+1]
gotFirstName = True
i += 2
elif sys.argv[i].lower() == u'lastname':
body[u'name'][u'familyName'] = sys.argv[i+1]
gotLastName = True
i += 2
elif sys.argv[i].lower() == u'password':
body[u'password'] = sys.argv[i+1]
need_password = False
i += 2
elif sys.argv[i].lower() == u'suspended':
if sys.argv[i+1].lower() in true_values:
body[u'suspended'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'suspended'] = False
else:
print u'ERROR: suspended should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'gal':
if sys.argv[i+1].lower() in true_values:
body[u'includeInGlobalAddressList'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'includeInGlobalAddressList'] = False
else:
print u'ERROR: gal should be on or off, not %s' % sys.argv[i+1]
sys.exit(52)
i += 2
elif sys.argv[i].lower() in [u'sha', u'sha1', u'sha-1']:
body[u'hashFunction'] = u'SHA-1'
need_to_hash_password = False
i += 1
elif sys.argv[i].lower() == u'md5':
body[u'hashFunction'] = u'MD5'
need_to_hash_password = False
i += 1
elif sys.argv[i].lower() == u'crypt':
body[u'hashFunction'] = u'crypt'
need_to_hash_password = False
i += 1
elif sys.argv[i].lower() == u'nohash':
need_to_hash_password = False
i += 1
elif sys.argv[i].lower() == u'changepassword':
if sys.argv[i+1].lower() in true_values:
body[u'changePasswordAtNextLogin'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'changePasswordAtNextLogin'] = False
else:
print u'ERROR: changepassword should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'ipwhitelisted':
if sys.argv[i+1].lower() in true_values:
body[u'ipWhitelisted'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'ipWhitelisted'] = False
else:
print u'ERROR: ipwhitelisted should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'admin':
do_admin = True
if sys.argv[i+1].lower() in true_values:
admin_body = {u'status': True}
elif sys.argv[i+1].lower() in false_values:
admin_body = {u'status': False}
else:
print u'ERROR: admin should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'agreedtoterms':
if sys.argv[i+1].lower() in true_values:
body[u'agreedToTerms'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'agreedToTerms'] = False
else:
print u'ERROR: agreedtoterms should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'org', u'ou']:
org = sys.argv[i+1]
if org[0] != u'/':
org = u'/%s' % org
body[u'orgUnitPath'] = org
i += 2
elif sys.argv[i].lower() == u'im':
im = dict()
i += 1
if sys.argv[i].lower() != u'type':
print u'ERROR: wrong format for account im details. Expected type got %s' % sys.argv[i]
sys.exit(2)
i += 1
im[u'type'] = sys.argv[i].lower()
if im[u'type'] not in [u'custom', u'home', u'other', u'work']:
print u'ERROR: type should be custom, home, other or work. Got %s' % im[u'type']
sys.exit(2)
if im[u'type'] == u'custom':
i += 1
im[u'customType'] = sys.argv[i]
i += 1
if sys.argv[i].lower() != u'protocol':
print u'ERROR: wrong format for account details. Expected protocol got %s' % sys.argv[i]
sys.exit(2)
i += 1
im[u'protocol'] = sys.argv[i].lower()
if im[u'protocol'] not in [u'custom_protocol', u'aim', u'gtalk', u'icq', u'jabber', u'msn', u'net_meeting', u'qq', u'skype', u'yahoo']:
print u'ERROR: protocol should be custom_protocol, aim, gtalk, icq, jabber, msn, net_meeting, qq, skype or yahoo. Got %s' % im[u'protocol']
sys.exit(2)
if im[u'protocol'] == u'custom_protocol':
i += 1
im[u'customProtocol'] = sys.argv[i]
i += 1
if sys.argv[i].lower() == u'primary':
im[u'primary'] = True
i += 1
im[u'im'] = sys.argv[i]
try:
body[u'ims'].append(im)
except KeyError:
body[u'ims'] = [im,]
i += 1
elif sys.argv[i].lower() == u'address':
address = dict()
i += 1
if sys.argv[i].lower() != u'type':
print u'ERROR: wrong format for account address details. Expected type got %s' % sys.argv[i]
sys.exit(2)
i += 1
address[u'type'] = sys.argv[i].lower()
if address[u'type'] not in [u'custom', u'home', u'other', u'work']:
print u'ERROR: wrong type should be custom, home, other or work. Got %s' % address[u'type']
sys.exit(2)
if address[u'type'] == u'custom':
i += 1
address[u'customType'] = sys.argv[i]
i += 1
if sys.argv[i].lower() == u'unstructured':
i += 1
address[u'sourceIsStructured'] = False
address[u'formatted'] = sys.argv[i]
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'pobox':
address[u'poBox'] = sys.argv[i+1]
i += 2
elif argument == u'extendedaddress':
address[u'extendedAddress'] = sys.argv[i+1]
i += 2
elif argument == u'streetaddress':
address[u'streetAddress'] = sys.argv[i+1]
i += 2
elif argument == u'locality':
address[u'locality'] = sys.argv[i+1]
i += 2
elif argument == u'region':
address[u'region'] = sys.argv[i+1]
i += 2
elif argument == u'postalcode':
address[u'postalCode'] = sys.argv[i+1]
i += 2
elif argument == u'country':
address[u'country'] = sys.argv[i+1]
i += 2
elif argument == u'countrycode':
address[u'countryCode'] = sys.argv[i+1]
i += 2
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
address[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account address details' % sys.argv[i]
sys.exit(2)
try:
body[u'addresses'].append(address)
except KeyError:
body[u'addresses'] = [address,]
elif sys.argv[i].lower() == u'organization':
organization = dict()
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'name':
organization[u'name'] = sys.argv[i+1]
i += 2
elif argument == u'title':
organization[u'title'] = sys.argv[i+1]
i += 2
elif argument == u'customtype':
organization[u'customType'] = sys.argv[i+1]
i += 2
elif argument == u'type':
organization[u'type'] = sys.argv[i+1].lower()
if organization[u'type'] not in [u'domain_only', u'school', u'unknown', u'work']:
print u'ERROR: organization type must be domain_only, school, unknown or work. Got %s' % organization[u'type']
sys.exit(2)
i += 2
elif argument == u'department':
organization[u'department'] = sys.argv[i+1]
i += 2
elif argument == u'symbol':
organization[u'symbol'] = sys.argv[i+1]
i += 2
elif argument == u'costcenter':
organization[u'costCenter'] = sys.argv[i+1]
i += 2
elif argument == u'location':
organization[u'location'] = sys.argv[i+1]
i += 2
elif argument == u'description':
organization[u'description'] = sys.argv[i+1]
i += 2
elif argument == u'domain':
organization[u'domain'] = sys.argv[i+1]
i += 2
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
organization[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account organization details' % sys.argv[i]
sys.exit(2)
try:
body[u'organizations'].append(organization)
except KeyError:
body[u'organizations'] = [organization,]
elif sys.argv[i].lower() == u'phone':
phone = dict()
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'value':
phone[u'value'] = sys.argv[i+1]
i += 2
elif argument == u'type':
phone[u'type'] = sys.argv[i+1].lower()
if phone[u'type'] not in [u'assistant', u'callback', u'car', u'company_main', u'custom', u'grand_central', u'home', u'home_fax', u'isdn', u'main', u'mobile', u'other', u'other_fax', u'pager', u'radio', u'telex', u'tty_tdd', u'work', u'work_fax', u'work_mobile', u'work_pager']:
print u'ERROR: phone type must be assistant, callback, car, company_main, custom, grand_central, home, home_fax, isdn, main, mobile, other, other_fax, pager, radio, telex, tty_tdd, work, work_fax, work_mobile, work_pager. Got %s' % phone[u'type']
sys.exit(2)
i += 2
if phone[u'type'] == u'custom':
phone[u'customType'] = sys.argv[i]
i += 1
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
phone[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account phone details' % sys.argv[i]
sys.exit(2)
try:
body[u'phones'].append(phone)
except KeyError:
body[u'phones'] = [phone,]
elif sys.argv[i].lower() == u'relation':
relation = dict()
i += 1
relation[u'type'] = sys.argv[i]
if relation[u'type'].lower() not in [u'mother', u'father', u'sister', u'brother', u'manager', u'assistant', u'partner']:
relation[u'type'] = u'custom'
relation[u'customType'] = sys.argv[i]
i += 1
relation[u'value'] = sys.argv[i]
try:
body[u'relations'].append(relation)
except KeyError:
body[u'relations'] = [relation,]
i += 1
elif sys.argv[i].lower() == u'externalid':
externalid = dict()
i += 1
externalid[u'type'] = sys.argv[i]
if externalid[u'type'].lower() not in [u'account', u'customer', u'network', u'organization']:
externalid[u'type'] = u'custom'
externalid[u'customType'] = sys.argv[i]
i += 1
externalid[u'value'] = sys.argv[i]
try:
body[u'externalIds'].append(externalid)
except KeyError:
body[u'externalIds'] = [externalid,]
i += 1
else:
if u'customSchemas' not in body:
body[u'customSchemas'] = {}
try:
(schemaName, fieldName) = sys.argv[i].split(u'.')
except ValueError:
print u'ERROR: %s is not a valid create user argument or custom schema name.' % sys.argv[i]
sys.exit(2)
field_value = sys.argv[i+1]
is_multivalue = False
if field_value.lower() in [u'multivalue', u'multivalued', u'value']:
is_multivalue = True
field_value = sys.argv[i+2]
if schemaName not in body[u'customSchemas']:
body[u'customSchemas'][schemaName] = {}
if is_multivalue:
if fieldName not in body[u'customSchemas'][schemaName]:
body[u'customSchemas'][schemaName][fieldName] = []
body[u'customSchemas'][schemaName][fieldName].append({u'value': field_value})
else:
body[u'customSchemas'][schemaName][fieldName] = field_value
i += 2
if is_multivalue:
i += 1
if not gotFirstName:
body[u'name'][u'givenName'] = u'Unknown'
if not gotLastName:
body[u'name'][u'familyName'] = u'Unknown'
if need_password:
body[u'password'] = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~`!@#$%^&*()-=_+:;"\'{}[]\\|', 25))
if need_to_hash_password:
body[u'password'] = gen_sha512_hash(body[u'password'])
body[u'hashFunction'] = u'crypt'
print u"Creating account for %s" % body[u'primaryEmail']
callGAPI(cd.users(), u'insert', body=body, fields=u'primaryEmail')
if do_admin:
print u' Changing admin status for %s to %s' % (body[u'primaryEmail'], admin_body[u'status'])
callGAPI(cd.users(), u'makeAdmin', userKey=body[u'primaryEmail'], body=admin_body)
def doCreateGroup():
cd = buildGAPIObject(u'directory')
body = dict()
body[u'email'] = sys.argv[3]
if body[u'email'].find(u'@') == -1:
body[u'email'] = u'%s@%s' % (body[u'email'], GC_Values[GC_DOMAIN])
got_name = False
i = 4
gs_body = dict()
gs = None
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'name'] = sys.argv[i+1]
got_name = True
i += 2
elif sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
else:
value = sys.argv[i+1]
gs = buildGAPIObject(u'groupssettings')
gs_object = gs._rootDesc
matches_gs_setting = False
for (attrib, params) in gs_object[u'schemas'][u'Groups'][u'properties'].items():
if attrib in [u'kind', u'etag', u'email', u'name', u'description']:
continue
if sys.argv[i].lower().replace(u'_', u'') == attrib.lower():
matches_gs_setting = True
if params[u'type'] == u'integer':
try:
if value[-1:].upper() == u'M':
value = int(value[:-1]) * 1024 * 1024
elif value[-1:].upper() == u'K':
value = int(value[:-1]) * 1024
elif value[-1].upper() == u'B':
value = int(value[:-1])
else:
value = int(value)
except ValueError:
print u'ERROR: %s must be a number ending with M (megabytes), K (kilobytes) or nothing (bytes). Got %s' % value
sys.exit(2)
elif params[u'type'] == u'string':
if params[u'description'].find(value.upper()) != -1: # ugly hack because API wants some values uppercased.
value = value.upper()
elif value.lower() in true_values:
value = u'true'
elif value.lower() in false_values:
value = u'false'
break
if not matches_gs_setting:
print u'ERROR: %s is not a valid argument for "gam create group"' % sys.argv[i]
sys.exit(2)
gs_body[attrib] = value
i += 2
if not got_name:
body[u'name'] = body[u'email']
print u"Creating group %s" % body[u'email']
callGAPI(cd.groups(), u'insert', body=body, fields=u'email')
if gs:
callGAPI(gs.groups(), u'patch', retry_reasons=[u'serviceLimit'], groupUniqueId=body[u'email'], body=gs_body)
def doCreateAlias():
cd = buildGAPIObject(u'directory')
body = dict()
body[u'alias'] = sys.argv[3]
if body[u'alias'].find(u'@') == -1:
body[u'alias'] = u'%s@%s' % (body[u'alias'], GC_Values[GC_DOMAIN])
target_type = sys.argv[4].lower()
if target_type not in [u'user', u'group', u'target']:
print u'ERROR: type of target should be user or group. Got %s' % target_type
sys.exit(2)
targetKey = sys.argv[5]
if targetKey.find(u'@') == -1:
targetKey = u'%s@%s' % (targetKey, GC_Values[GC_DOMAIN])
print u'Creating alias %s for %s %s' % (body[u'alias'], target_type, targetKey)
if target_type == u'user':
callGAPI(cd.users().aliases(), u'insert', userKey=targetKey, body=body)
elif target_type == u'group':
callGAPI(cd.groups().aliases(), u'insert', groupKey=targetKey, body=body)
elif target_type == u'target':
try:
callGAPI(cd.users().aliases(), u'insert', throw_reasons=[u'invalid'], userKey=targetKey, body=body)
except googleapiclient.errors.HttpError:
callGAPI(cd.groups().aliases(), u'insert', groupKey=targetKey, body=body)
def doCreateOrg():
cd = buildGAPIObject(u'directory')
body = dict()
body[u'name'] = sys.argv[3]
if body[u'name'][0] == u'/':
body[u'name'] = body[u'name'][1:]
i = 4
body[u'parentOrgUnitPath'] = u'/'
while i < len(sys.argv):
if sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'parent':
body[u'parentOrgUnitPath'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'noinherit':
body[u'blockInheritance'] = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam create org"' % sys.argv[i]
sys.exit(2)
callGAPI(cd.orgunits(), u'insert', customerId=GC_Values[GC_CUSTOMER_ID], body=body)
def doCreateResourceCalendar():
cd = buildGAPIObject(u'directory')
body = {u'resourceId': sys.argv[3],
u'resourceName': sys.argv[4]}
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'description':
body[u'resourceDescription'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'type':
body[u'resourceType'] = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam create resource"' % sys.argv[i]
sys.exit(2)
print u'Creating resource %s...' % body[u'resourceId']
callGAPI(cd.resources().calendars(), u'insert',
customer=GC_Values[GC_CUSTOMER_ID], body=body)
def doUpdateUser(users, i):
cd = buildGAPIObject(u'directory')
body = dict()
gotPassword = isMD5 = isSHA1 = isCrypt = False
is_admin = nohash = None
do_update_user = False
do_admin_user = False
while i < len(sys.argv):
if sys.argv[i].lower() == u'firstname':
do_update_user = True
if u'name' not in body:
body[u'name'] = dict()
body[u'name'][u'givenName'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'lastname':
do_update_user = True
if u'name' not in body:
body[u'name'] = dict()
body[u'name'][u'familyName'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'username', u'email']:
do_update_user = True
body[u'primaryEmail'] = sys.argv[i+1]
if body[u'primaryEmail'].find(u'@') == -1:
body[u'primaryEmail'] = u'%s@%s' % (body[u'primaryEmail'], GC_Values[GC_DOMAIN])
i += 2
elif sys.argv[i].lower() == u'password':
do_update_user = True
body[u'password'] = sys.argv[i+1]
if body[u'password'].lower() == u'random':
body[u'password'] = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789~`!@#$%^&*()-=_+:;"\'{}[]\\|', 50))
i += 2
gotPassword = True
elif sys.argv[i].lower() == u'admin':
do_admin_user = True
if sys.argv[i+1].lower() in true_values:
is_admin = True
elif sys.argv[i+1].lower() in false_values:
is_admin = False
i += 2
elif sys.argv[i].lower() == u'suspended':
do_update_user = True
if sys.argv[i+1].lower() in true_values:
body[u'suspended'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'suspended'] = False
i += 2
elif sys.argv[i].lower() == u'gal':
do_update_user = True
if sys.argv[i+1].lower() in true_values:
body[u'includeInGlobalAddressList'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'includeInGlobalAddressList'] = False
else:
print u'ERROR: gal should be on or off, not %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'ipwhitelisted':
do_update_user = True
if sys.argv[i+1].lower() in true_values:
body[u'ipWhitelisted'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'ipWhitelisted'] = False
i += 2
elif sys.argv[i].lower() in [u'sha', u'sha1', u'sha-1']:
do_update_user = True
body[u'hashFunction'] = u'SHA-1'
i += 1
isSHA1 = True
elif sys.argv[i].lower() == u'md5':
do_update_user = True
body[u'hashFunction'] = u'MD5'
i += 1
isMD5 = True
elif sys.argv[i].lower() == u'crypt':
do_update_user = True
body[u'hashFunction'] = u'crypt'
i += 1
isCrypt = True
elif sys.argv[i].lower() == u'nohash':
nohash = True
i += 1
elif sys.argv[i].lower() == u'changepassword':
do_update_user = True
if sys.argv[i+1].lower() in true_values:
body[u'changePasswordAtNextLogin'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'changePasswordAtNextLogin'] = False
i += 2
elif sys.argv[i].lower() in [u'org', u'ou']:
do_update_user = True
body[u'orgUnitPath'] = sys.argv[i+1]
if body[u'orgUnitPath'][0] != u'/':
body[u'orgUnitPath'] = u'/'+body[u'orgUnitPath']
i += 2
elif sys.argv[i].lower() == u'agreedtoterms':
do_update_user = True
if sys.argv[i+1].lower() in true_values:
body[u'agreedToTerms'] = True
elif sys.argv[i+1].lower() in false_values:
body[u'agreedToTerms'] = False
i += 2
elif sys.argv[i].lower() == u'customerid':
do_update_user = True
body[u'customerId'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'im':
do_update_user = True
im = dict()
i += 1
if sys.argv[i].lower() != u'type':
print u'ERROR: wrong format for account im details. Expected type got %s' % sys.argv[i]
sys.exit(2)
i += 1
im[u'type'] = sys.argv[i].lower()
if im[u'type'] not in [u'custom', u'home', u'other', u'work']:
print u'ERROR: type should be custom, home, other or work. Got %s' % im[u'type']
sys.exit(2)
if im[u'type'] == u'custom':
i += 1
im[u'customType'] = sys.argv[i]
i += 1
if sys.argv[i].lower() != u'protocol':
print u'ERROR: wrong format for account details. Expected protocol got %s' % sys.argv[i]
sys.exit(2)
i += 1
im[u'protocol'] = sys.argv[i].lower()
if im[u'protocol'] not in [u'custom_protocol', u'aim', u'gtalk', u'icq', u'jabber', u'msn', u'net_meeting', u'qq', u'skype', u'yahoo']:
print u'ERROR: protocol should be custom_protocol, aim, gtalk, icq, jabber, msn, net_meeting, qq, skype or yahoo. Got %s' % im[u'protocol']
sys.exit(2)
if im[u'protocol'] == u'custom_protocol':
i += 1
im[u'customProtocol'] = sys.argv[i]
i += 1
if sys.argv[i].lower() == u'primary':
im[u'primary'] = True
i += 1
im[u'im'] = sys.argv[i]
i += 1
try:
body[u'ims'].append(im)
except KeyError:
body[u'ims'] = [im,]
elif sys.argv[i].lower() == u'address':
do_update_user = True
address = dict()
i += 1
if sys.argv[i].lower() != u'type':
print u'ERROR: wrong format for account address details. Expected type got %s' % sys.argv[i]
sys.exit(2)
i += 1
address[u'type'] = sys.argv[i].lower()
if address[u'type'] not in [u'custom', u'home', u'other', u'work']:
print u'ERROR: wrong type should be custom, home, other or work. Got %s' % address[u'type']
sys.exit(2)
if address[u'type'] == u'custom':
i += 1
address[u'customType'] = sys.argv[i]
i += 1
if sys.argv[i].lower() == u'unstructured':
i += 1
address[u'sourceIsStructured'] = False
address[u'formatted'] = sys.argv[i]
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'pobox':
address[u'poBox'] = sys.argv[i+1]
i += 2
elif argument == u'extendedaddress':
address[u'extendedAddress'] = sys.argv[i+1]
i += 2
elif argument == u'streetaddress':
address[u'streetAddress'] = sys.argv[i+1]
i += 2
elif argument == u'locality':
address[u'locality'] = sys.argv[i+1]
i += 2
elif argument == u'region':
address[u'region'] = sys.argv[i+1]
i += 2
elif argument == u'postalcode':
address[u'postalCode'] = sys.argv[i+1]
i += 2
elif argument == u'country':
address[u'country'] = sys.argv[i+1]
i += 2
elif argument == u'countrycode':
address[u'countryCode'] = sys.argv[i+1]
i += 2
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
address[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account address details' % sys.argv[i]
sys.exit(2)
try:
body[u'addresses'].append(address)
except KeyError:
body[u'addresses'] = [address,]
elif sys.argv[i].lower() == u'organization':
do_update_user = True
organization = dict()
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'name':
organization[u'name'] = sys.argv[i+1]
i += 2
elif argument == u'title':
organization[u'title'] = sys.argv[i+1]
i += 2
elif argument == u'customtype':
organization[u'customType'] = sys.argv[i+1]
i += 2
elif argument == u'type':
organization[u'type'] = sys.argv[i+1].lower()
if organization[u'type'] not in [u'domain_only', u'school', u'unknown', u'work']:
print u'ERROR: organization type must be domain_only, school, unknown or work. Got %s' % organization[u'type']
sys.exit(2)
i += 2
elif argument == u'department':
organization[u'department'] = sys.argv[i+1]
i += 2
elif argument == u'symbol':
organization[u'symbol'] = sys.argv[i+1]
i += 2
elif argument == u'costcenter':
organization[u'costCenter'] = sys.argv[i+1]
i += 2
elif argument == u'location':
organization[u'location'] = sys.argv[i+1]
i += 2
elif argument == u'description':
organization[u'description'] = sys.argv[i+1]
i += 2
elif argument == u'domain':
organization[u'domain'] = sys.argv[i+1]
i += 2
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
organization[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account organization details' % sys.argv[i]
sys.exit(2)
try:
body[u'organizations'].append(organization)
except KeyError:
body[u'organizations'] = [organization,]
elif sys.argv[i].lower() == u'phone':
do_update_user = True
phone = dict()
i += 1
while True:
argument = sys.argv[i].lower()
if argument == u'value':
phone[u'value'] = sys.argv[i+1]
i += 2
elif argument == u'type':
phone[u'type'] = sys.argv[i+1].lower()
if phone[u'type'] not in [u'assistant', u'callback', u'car', u'company_main', u'custom', u'grand_central', u'home', u'home_fax', u'isdn', u'main', u'mobile', u'other', u'other_fax', u'pager', u'radio', u'telex', u'tty_tdd', u'work', u'work_fax', u'work_mobile', u'work_pager']:
print u'ERROR: phone type must be assistant, callback, car, company_main, custom, grand_central, home, home_fax, isdn, main, mobile, other, other_fax, pager, radio, telex, tty_tdd, work, work_fax, work_mobile, work_pager. Got %s' % phone[u'type']
sys.exit(2)
i += 2
if phone[u'type'] == u'custom':
phone[u'customType'] = sys.argv[i]
i += 1
elif argument == u'notprimary':
i += 1
break
elif argument == u'primary':
phone[u'primary'] = True
i += 1
break
else:
print u'ERROR: invalid argument (%s) for account phone details' % sys.argv[i]
sys.exit(2)
try:
body[u'phones'].append(phone)
except KeyError:
body[u'phones'] = [phone,]
elif sys.argv[i].lower() == u'relation':
do_update_user = True
relation = dict()
i += 1
relation[u'type'] = sys.argv[i]
if relation[u'type'].lower() not in [u'mother', u'father', u'sister', u'brother', u'manager', u'assistant', u'partner']:
relation[u'type'] = u'custom'
relation[u'customType'] = sys.argv[i]
i += 1
relation[u'value'] = sys.argv[i]
try:
body[u'relations'].append(relation)
except KeyError:
body[u'relations'] = [relation,]
i += 1
elif sys.argv[i].lower() == u'otheremail':
do_update_user = True
an_email = dict()
i += 1
an_email[u'type'] = sys.argv[i]
if an_email[u'type'].lower() not in [u'custom', u'home', u'other', u'work']:
an_email[u'type'] = u'custom'
an_email[u'customType'] = sys.argv[i]
i += 1
an_email[u'address'] = sys.argv[i]
if u'emails' not in body:
body[u'emails'] = list()
body[u'emails'].append(an_email)
i += 1
elif sys.argv[i].lower() == u'externalid':
do_update_user = True
externalid = dict()
i += 1
externalid[u'type'] = sys.argv[i]
if externalid[u'type'].lower() not in [u'account', u'customer', u'network', u'organization']:
externalid[u'type'] = u'custom'
externalid[u'customType'] = sys.argv[i]
i += 1
externalid[u'value'] = sys.argv[i]
try:
body[u'externalIds'].append(externalid)
except KeyError:
body[u'externalIds'] = [externalid,]
i += 1
# else:
# showUsage()
# print u''
# print u'ERROR: didn\'t expect %s command at position %s' % (sys.argv[i], i)
# sys.exit(2)
else:
do_update_user = True
if u'customSchemas' not in body:
body[u'customSchemas'] = {}
try:
(schemaName, fieldName) = sys.argv[i].split(u'.')
except ValueError:
print u'ERROR: %s is not a valid user update argument or custom schema name' % sys.argv[i]
sys.exit(2)
field_value = sys.argv[i+1]
is_multivalue = False
if field_value.lower() in [u'multivalue', u'multivalued', u'value']:
is_multivalue = True
field_value = sys.argv[i+2]
if schemaName not in body[u'customSchemas']:
body[u'customSchemas'][schemaName] = {}
if is_multivalue:
if fieldName not in body[u'customSchemas'][schemaName]:
body[u'customSchemas'][schemaName][fieldName] = []
body[u'customSchemas'][schemaName][fieldName].append({u'value': field_value})
else:
body[u'customSchemas'][schemaName][fieldName] = field_value
i += 2
if is_multivalue:
i += 1
if gotPassword and not (isSHA1 or isMD5 or isCrypt or nohash):
body[u'password'] = gen_sha512_hash(body[u'password'])
body[u'hashFunction'] = u'crypt'
for user in users:
if user[:4].lower() == u'uid:':
user = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
if u'primaryEmail' in body and body[u'primaryEmail'][:4].lower() == u'vfe@':
user_primary = callGAPI(cd.users(), u'get', userKey=user, fields=u'primaryEmail,id')
user = user_primary[u'id']
user_primary = user_primary[u'primaryEmail']
user_name = user_primary[:user_primary.find(u'@')]
user_domain = user_primary[user_primary.find(u'@')+1:]
body[u'primaryEmail'] = u'vfe.%s.%05d@%s' % (user_name, random.randint(1, 99999), user_domain)
body[u'emails'] = [{u'type': u'custom', u'customType': u'former_employee', u'primary': False, u'address': user_primary}]
sys.stdout.write(u'updating user %s...\n' % user)
if do_update_user:
callGAPI(cd.users(), u'patch', userKey=user, body=body)
if do_admin_user:
callGAPI(cd.users(), u'makeAdmin', userKey=user, body={u'status': is_admin})
def doRemoveUsersAliases(users):
cd = buildGAPIObject(u'directory')
for user in users:
user_aliases = callGAPI(cd.users(), u'get', userKey=user, fields=u'aliases,id,primaryEmail')
user_id = user_aliases[u'id']
user_primary = user_aliases[u'primaryEmail']
if u'aliases' in user_aliases:
print u'%s has %s aliases' % (user_primary, len(user_aliases[u'aliases']))
for an_alias in user_aliases[u'aliases']:
print u' removing alias %s for %s...' % (an_alias, user_primary)
callGAPI(cd.users().aliases(), u'delete', userKey=user_id, alias=an_alias)
else:
print u'%s has no aliases' % user_primary
def doRemoveUsersGroups(users):
cd = buildGAPIObject(u'directory')
for user in users:
user_groups = callGAPIpages(cd.groups(), u'list', u'groups', userKey=user, fields=u'groups(id,email)')
num_groups = len(user_groups)
print u'%s is in %s groups' % (user, num_groups)
i = 1
for user_group in user_groups:
print u' removing %s from %s (%s/%s)' % (user, user_group[u'email'], i, num_groups)
callGAPI(cd.members(), u'delete', soft_errors=True, groupKey=user_group[u'id'], memberKey=user)
i += 1
print u''
def doUpdateGroup():
cd = buildGAPIObject(u'directory')
group = sys.argv[3]
if sys.argv[4].lower() in [u'add', u'update', u'sync', u'remove']:
if group[0:3].lower() == u'uid:':
group = group[4:]
elif group.find(u'@') == -1:
group = u'%s@%s' % (group, GC_Values[GC_DOMAIN])
if sys.argv[4].lower() in [u'add', u'update']:
role = sys.argv[5].upper()
i = 6
if role not in [u'OWNER', u'MANAGER', u'MEMBER']:
role = u'MEMBER'
i = 5
if sys.argv[i].lower() in usergroup_types:
users_email = getUsersToModify(entity_type=sys.argv[i], entity=sys.argv[i+1])
else:
users_email = [sys.argv[i],]
body = {u'role': role}
for user_email in users_email:
if user_email[:4].lower() == u'uid:':
user_email = user_email[4:]
body[u'id'] = user_email
else:
if user_email.find(u'@') == -1:
user_email = u'%s@%s' % (user_email, GC_Values[GC_DOMAIN])
body[u'email'] = user_email
sys.stderr.write(u' %sing %s %s...' % (sys.argv[4].lower(), role.lower(), user_email))
try:
if sys.argv[4].lower() == u'add':
body = {u'role': role}
body[u'email'] = user_email
result = callGAPI(cd.members(), u'insert', soft_errors=True, groupKey=group, body=body)
elif sys.argv[4].lower() == u'update':
result = callGAPI(cd.members(), u'update', soft_errors=True, groupKey=group, memberKey=user_email, body=body)
if result:
addr = result.get(u'email', None)
if addr:
addr = addr.lower()
if addr != user_email.lower():
print u'added %s (primary address) to group' % addr
else:
print u'added %s to group' % addr
else:
print u'added %s to group' % result[u'id']
except googleapiclient.errors.HttpError:
pass
elif sys.argv[4].lower() == u'sync':
role = sys.argv[5].upper()
i = 6
if role not in [u'OWNER', u'MANAGER', u'MEMBER']:
role = u'MEMBER'
i = 5
users_email = getUsersToModify(entity_type=sys.argv[i], entity=sys.argv[i+1])
users_email = [x.lower() for x in users_email]
current_emails = getUsersToModify(entity_type=u'group', entity=group, member_type=role)
current_emails = [x.lower() for x in current_emails]
to_add = list(set(users_email) - set(current_emails))
to_remove = list(set(current_emails) - set(users_email))
for user_email in to_add:
sys.stderr.write(u' adding %s %s\n' % (role, user_email))
try:
result = callGAPI(cd.members(), u'insert', soft_errors=True, throw_reasons=[u'duplicate'], groupKey=group, body={u'email': user_email, u'role': role})
except googleapiclient.errors.HttpError:
result = callGAPI(cd.members(), u'update', soft_errors=True, groupKey=group, memberKey=user_email, body={u'email': user_email, u'role': role})
for user_email in to_remove:
sys.stderr.write(u' removing %s\n' % user_email)
result = callGAPI(cd.members(), u'delete', soft_errors=True, groupKey=group, memberKey=user_email)
elif sys.argv[4].lower() == u'remove':
i = 5
if sys.argv[i].lower() in [u'member', u'manager', u'owner']:
i += 1
if sys.argv[i].lower() in usergroup_types:
user_emails = getUsersToModify(entity_type=sys.argv[i], entity=sys.argv[i+1])
else:
user_emails = [sys.argv[i],]
for user_email in user_emails:
if user_email[:4].lower() == u'uid:':
user_email = user_email[4:]
elif user_email.find(u'@') == -1:
user_email = u'%s@%s' % (user_email, GC_Values[GC_DOMAIN])
sys.stderr.write(u' removing %s\n' % user_email)
result = callGAPI(cd.members(), u'delete', soft_errors=True, groupKey=group, memberKey=user_email)
else:
i = 4
use_cd_api = False
gs = None
gs_body = dict()
cd_body = dict()
while i < len(sys.argv):
if sys.argv[i].lower() == u'email':
use_cd_api = True
cd_body[u'email'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'admincreated':
use_cd_api = True
cd_body[u'adminCreated'] = sys.argv[i+1].lower()
if cd_body[u'adminCreated'] not in [u'true', u'false']:
print u'ERROR: Value for admincreated must be true or false. Got %s' % cd_body[u'adminCreated']
sys.exit(2)
i += 2
else:
value = sys.argv[i+1]
gs = buildGAPIObject(u'groupssettings')
gs_object = gs._rootDesc
matches_gs_setting = False
for (attrib, params) in gs_object[u'schemas'][u'Groups'][u'properties'].items():
if attrib in [u'kind', u'etag', u'email']:
continue
if sys.argv[i].lower().replace(u'_', u'') == attrib.lower():
matches_gs_setting = True
if params[u'type'] == u'integer':
try:
if value[-1:].upper() == u'M':
value = int(value[:-1]) * 1024 * 1024
elif value[-1:].upper() == u'K':
value = int(value[:-1]) * 1024
elif value[-1].upper() == u'B':
value = int(value[:-1])
else:
value = int(value)
except ValueError:
print u'ERROR: %s must be a number ending with M (megabytes), K (kilobytes) or nothing (bytes). Got %s' % value
sys.exit(2)
elif params[u'type'] == u'string':
if params[u'description'].find(value.upper()) != -1: # ugly hack because API wants some values uppercased.
value = value.upper()
elif value.lower() in true_values:
value = u'true'
elif value.lower() in false_values:
value = u'false'
break
if not matches_gs_setting:
print u'ERROR: %s is not a valid argument for "gam update group"' % sys.argv[i]
sys.exit(2)
gs_body[attrib] = value
i += 2
if group[:4].lower() == u'uid:': # group settings API won't take uid so we make sure cd API is used so that we can grab real email.
use_cd_api = True
group = group[4:]
elif group.find(u'@') == -1:
group = u'%s@%s' % (group, GC_Values[GC_DOMAIN])
if use_cd_api:
try:
if cd_body[u'email'].find(u'@') == -1:
cd_body[u'email'] = u'%s@%s' % (cd_body[u'email'], GC_Values[GC_DOMAIN])
except KeyError:
pass
cd_result = callGAPI(cd.groups(), u'patch', groupKey=group, body=cd_body)
if gs:
if use_cd_api:
group = cd_result[u'email']
callGAPI(gs.groups(), u'patch', retry_reasons=[u'serviceLimit'], groupUniqueId=group, body=gs_body)
print u'updated group %s' % group
def doUpdateAlias():
cd = buildGAPIObject(u'directory')
alias = sys.argv[3]
target_type = sys.argv[4].lower()
if target_type not in [u'user', u'group', u'target']:
print u'ERROR: target type should be "user", "group" or "target", got %s' % target_type
sys.exit(2)
target_email = sys.argv[5]
if alias.find(u'@') == -1:
alias = u'%s@%s' % (alias, GC_Values[GC_DOMAIN])
if target_email.find(u'@') == -1:
target_email = u'%s@%s' % (target_email, GC_Values[GC_DOMAIN])
try:
callGAPI(cd.users().aliases(), u'delete', throw_reasons=[u'invalid'], userKey=alias, alias=alias)
except googleapiclient.errors.HttpError:
callGAPI(cd.groups().aliases(), u'delete', groupKey=alias, alias=alias)
if target_type == u'user':
callGAPI(cd.users().aliases(), u'insert', userKey=target_email, body={u'alias': alias})
elif target_type == u'group':
callGAPI(cd.groups().aliases(), u'insert', groupKey=target_email, body={u'alias': alias})
elif target_type == u'target':
try:
callGAPI(cd.users().aliases(), u'insert', throw_reasons=[u'invalid'], userKey=target_email, body={u'alias': alias})
except googleapiclient.errors.HttpError:
callGAPI(cd.groups().aliases(), u'insert', groupKey=target_email, body={u'alias': alias})
print u'updated alias %s' % alias
def doUpdateResourceCalendar():
cd = buildGAPIObject(u'directory')
resId = sys.argv[3]
body = {}
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'resourceName'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'description':
body[u'resourceDescription'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'type':
body[u'resourceType'] = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam update resource"' % sys.argv[i]
sys.exit(2)
# Use patch since it seems to work better.
# update requires name to be set.
callGAPI(cd.resources().calendars(), u'patch',
customer=GC_Values[GC_CUSTOMER_ID], calendarResourceId=resId, body=body,
fields=u'')
print u'updated resource %s' % resId
def doUpdateCros():
cd = buildGAPIObject(u'directory')
deviceId = sys.argv[3]
if deviceId[:6].lower() == u'query:':
query = deviceId[6:]
devices_result = callGAPIpages(cd.chromeosdevices(), u'list', u'chromeosdevices',
query=query, customerId=GC_Values[GC_CUSTOMER_ID], fields=u'chromeosdevices/deviceId,nextPageToken')
devices = list()
for a_device in devices_result:
devices.append(a_device[u'deviceId'])
else:
devices = [deviceId,]
i = 4
body = dict()
while i < len(sys.argv):
if sys.argv[i].lower() == u'user':
body[u'annotatedUser'] = sys.argv[i + 1]
i += 2
elif sys.argv[i].lower() == u'location':
body[u'annotatedLocation'] = sys.argv[i + 1]
i += 2
elif sys.argv[i].lower() == u'notes':
body[u'notes'] = sys.argv[i + 1]
i += 2
elif sys.argv[i].lower() == u'status':
body[u'status'] = sys.argv[i + 1].upper()
#if body[u'status'] not in [u'ACTIVE', u'DEPROVISIONED']:
# print u'ERROR: status must be active or deprovisioned, got %s' % body[u'status']
# sys.exit(2)
i += 2
elif sys.argv[i].lower() in [u'tag', u'asset', u'assetid']:
body[u'annotatedAssetId'] = sys.argv[i + 1]
#annotatedAssetId - Handle Asset Tag Field 2015-04-13
i += 2
elif sys.argv[i].lower() in [u'ou', u'org']:
body[u'orgUnitPath'] = sys.argv[i + 1]
if body[u'orgUnitPath'][0] != u'/':
body[u'orgUnitPath'] = u'/%s' % body[u'orgUnitPath']
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam update cros"' % sys.argv[i]
sys.exit(2)
device_count = len(devices)
i = 1
for this_device in devices:
print u' updating %s (%s of %s)' % (this_device, i, device_count)
callGAPI(cd.chromeosdevices(), u'patch', deviceId=this_device, body=body, customerId=GC_Values[GC_CUSTOMER_ID])
i += 1
def doUpdateMobile():
cd = buildGAPIObject(u'directory')
resourceId = sys.argv[3]
i = 4
action_body = patch_body = dict()
doPatch = doAction = False
while i < len(sys.argv):
if sys.argv[i].lower() == u'action':
action_body[u'action'] = sys.argv[i+1].lower()
if action_body[u'action'] == u'wipe':
action_body[u'action'] = u'admin_remote_wipe'
elif action_body[u'action'].replace(u'_', u'') in [u'accountwipe', u'wipeaccount']:
action_body[u'action'] = u'admin_account_wipe'
if action_body[u'action'] not in [u'admin_remote_wipe', u'admin_account_wipe', u'approve', u'block', u'cancel_remote_wipe_then_activate', u'cancel_remote_wipe_then_block']:
print u'ERROR: action must be wipe, wipeaccount, approve, block, cancel_remote_wipe_then_activate or cancel_remote_wipe_then_block. Got %s' % action_body[u'action']
sys.exit(2)
doAction = True
i += 2
elif sys.argv[i].lower() == u'model':
patch_body[u'model'] = sys.argv[i+1]
i += 2
doPatch = True
elif sys.argv[i].lower() == u'os':
patch_body[u'os'] = sys.argv[i+1]
i += 2
doPatch = True
elif sys.argv[i].lower() == u'useragent':
patch_body[u'userAgent'] = sys.argv[i+1]
i += 2
doPatch = True
else:
print u'ERROR: %s is not a valid argument for "gam update mobile"' % sys.argv[i]
sys.exit(2)
if doPatch:
callGAPI(cd.mobiledevices(), u'patch', resourceId=resourceId, body=patch_body, customerId=GC_Values[GC_CUSTOMER_ID])
if doAction:
callGAPI(cd.mobiledevices(), u'action', resourceId=resourceId, body=action_body, customerId=GC_Values[GC_CUSTOMER_ID])
def doDeleteMobile():
cd = buildGAPIObject(u'directory')
resourceId = sys.argv[3]
callGAPI(cd.mobiledevices(), u'delete', resourceId=resourceId, customerId=GC_Values[GC_CUSTOMER_ID])
def doUpdateOrg():
cd = buildGAPIObject(u'directory')
orgUnitPath = sys.argv[3]
if sys.argv[4].lower() in [u'move', u'add']:
if sys.argv[5].lower() in usergroup_types:
users = getUsersToModify(entity_type=sys.argv[5].lower(), entity=sys.argv[6])
else:
users = getUsersToModify(entity_type=u'user', entity=sys.argv[5])
if (sys.argv[5].lower() == u'cros') or ((sys.argv[5].lower() == u'all') and (sys.argv[6].lower() == u'cros')):
cros_count = len(users)
current_cros = 1
for cros in users:
sys.stderr.write(u' moving %s to %s (%s/%s)\n' % (cros, orgUnitPath, current_cros, cros_count))
callGAPI(cd.chromeosdevices(), u'patch', soft_errors=True,
customerId=GC_Values[GC_CUSTOMER_ID], deviceId=cros, body={u'orgUnitPath': '//%s' % orgUnitPath})
current_cros += 1
else:
user_count = len(users)
current_user = 1
if orgUnitPath != u'/' and orgUnitPath[0] != u'/': # we do want a / at the beginning for user updates
orgUnitPath = u'/%s' % orgUnitPath
for user in users:
sys.stderr.write(u' moving %s to %s (%s/%s)\n' % (user, orgUnitPath, current_user, user_count))
try:
callGAPI(cd.users(), u'patch', throw_reasons=[u'conditionNotMet'], userKey=user, body={u'orgUnitPath': orgUnitPath})
except googleapiclient.errors.HttpError:
pass
current_user += 1
else:
body = dict()
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'name':
body[u'name'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'description':
body[u'description'] = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'parent':
body[u'parentOrgUnitPath'] = sys.argv[i+1]
if body[u'parentOrgUnitPath'][0] != u'/':
body[u'parentOrgUnitPath'] = u'/'+body[u'parentOrgUnitPath']
i += 2
elif sys.argv[i].lower() == u'noinherit':
body[u'blockInheritance'] = True
i += 1
elif sys.argv[i].lower() == u'inherit':
body[u'blockInheritance'] = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam update org"' % sys.argv[i]
sys.exit(2)
if orgUnitPath[0] == u'/': # we don't want a / at the beginning for OU updates
orgUnitPath = orgUnitPath[1:]
callGAPI(cd.orgunits(), u'update', customerId=GC_Values[GC_CUSTOMER_ID], orgUnitPath=orgUnitPath, body=body)
def doWhatIs():
cd = buildGAPIObject(u'directory')
email = sys.argv[2]
if email.find(u'@') == -1:
email = u'%s@%s' % (email, GC_Values[GC_DOMAIN])
try:
user_or_alias = callGAPI(cd.users(), u'get', throw_reasons=[u'notFound', u'badRequest', u'invalid'], userKey=email, fields=u'primaryEmail')
if user_or_alias[u'primaryEmail'].lower() == email.lower():
sys.stderr.write(u'%s is a user\n\n' % email)
doGetUserInfo(user_email=email)
return
else:
sys.stderr.write(u'%s is a user alias\n\n' % email)
doGetAliasInfo(alias_email=email)
return
except googleapiclient.errors.HttpError:
sys.stderr.write(u'%s is not a user...\n' % email)
sys.stderr.write(u'%s is not a user alias...\n' % email)
try:
group = callGAPI(cd.groups(), u'get', throw_reasons=[u'notFound', u'badRequest'], groupKey=email, fields=u'email')
except googleapiclient.errors.HttpError:
sys.stderr.write(u'%s is not a group either!\n\nDoesn\'t seem to exist!\n\n' % email)
sys.exit(1)
if group[u'email'].lower() == email.lower():
sys.stderr.write(u'%s is a group\n\n' % email)
doGetGroupInfo(group_name=email)
else:
sys.stderr.write(u'%s is a group alias\n\n' % email)
doGetAliasInfo(alias_email=email)
def doGetUserInfo(user_email=None):
cd = buildGAPIObject(u'directory')
i = 3
if user_email == None:
if len(sys.argv) > 3:
user_email = sys.argv[3]
i = 4
else:
user_email = GM_Globals[GM_ADMIN]
if user_email[:4].lower() == u'uid:':
user_email = user_email[4:]
elif user_email.find(u'@') == -1:
user_email = u'%s@%s' % (user_email, GC_Values[GC_DOMAIN])
getSchemas = getAliases = getGroups = getLicenses = True
projection = u'full'
customFieldMask = viewType = None
while i < len(sys.argv):
if sys.argv[i].lower() == u'noaliases':
getAliases = False
i += 1
elif sys.argv[i].lower() == u'nogroups':
getGroups = False
i += 1
elif sys.argv[i].lower() in [u'nolicenses', u'nolicences']:
getLicenses = False
i += 1
elif sys.argv[i].lower() == u'noschemas':
getSchemas = False
projection = u'basic'
i += 1
elif sys.argv[i].lower() == u'schemas':
getSchemas = True
projection = u'custom'
customFieldMask = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'userview':
viewType = u'domain_public'
getGroups = getLicenses = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam info user"' % sys.argv[i]
sys.exit(2)
user = callGAPI(cd.users(), u'get', userKey=user_email, projection=projection, customFieldMask=customFieldMask, viewType=viewType)
print u'User: %s' % user[u'primaryEmail']
if u'name' in user and u'givenName' in user[u'name']:
print convertUTF8(u'First Name: %s' % user[u'name'][u'givenName'])
if u'name' in user and u'familyName' in user[u'name']:
print convertUTF8(u'Last Name: %s' % user[u'name'][u'familyName'])
if u'isAdmin' in user:
print u'Is a Super Admin: %s' % user[u'isAdmin']
if u'isDelegatedAdmin' in user:
print u'Is Delegated Admin: %s' % user[u'isDelegatedAdmin']
if u'agreedToTerms' in user:
print u'Has Agreed to Terms: %s' % user[u'agreedToTerms']
if u'ipWhitelisted' in user:
print u'IP Whitelisted: %s' % user[u'ipWhitelisted']
if u'suspended' in user:
print u'Account Suspended: %s' % user[u'suspended']
if u'suspensionReason' in user:
print u'Suspension Reason: %s' % user[u'suspensionReason']
if u'changePasswordAtNextLogin' in user:
print u'Must Change Password: %s' % user[u'changePasswordAtNextLogin']
if u'id' in user:
print u'Google Unique ID: %s' % user[u'id']
if u'customerId' in user:
print u'Customer ID: %s' % user[u'customerId']
if u'isMailboxSetup' in user:
print u'Mailbox is setup: %s' % user[u'isMailboxSetup']
if u'includeInGlobalAddressList' in user:
print u'Included in GAL: %s' % user[u'includeInGlobalAddressList']
if u'creationTime' in user:
print u'Creation Time: %s' % user[u'creationTime']
if u'lastLoginTime' in user:
if user[u'lastLoginTime'] == u'1970-01-01T00:00:00.000Z':
print u'Last login time: Never'
else:
print u'Last login time: %s' % user[u'lastLoginTime']
if u'orgUnitPath' in user:
print u'Google Org Unit Path: %s\n' % user[u'orgUnitPath']
if u'thumbnailPhotoUrl' in user:
print u'Photo URL: %s\n' % user[u'thumbnailPhotoUrl']
if u'ims' in user:
print u'IMs:'
for im in user[u'ims']:
for key in im:
print convertUTF8(u' %s: %s' % (key, im[key]))
print u''
if u'addresses' in user:
print u'Addresses:'
for address in user[u'addresses']:
for key in address:
print convertUTF8(u' %s: %s' % (key, address[key]))
print u''
if u'organizations' in user:
print u'Organizations:'
for org in user[u'organizations']:
for key in org:
if key == u'customType' and not org[key]:
continue
print convertUTF8(u' %s: %s' % (key, org[key]))
print u''
if u'phones' in user:
print u'Phones:'
for phone in user[u'phones']:
for key in phone:
print convertUTF8(u' %s: %s' % (key, phone[key]))
print u''
if u'emails' in user:
if len(user[u'emails']) > 1:
print u'Other Emails:'
for an_email in user[u'emails']:
if an_email[u'address'].lower() == user[u'primaryEmail'].lower():
continue
for key in an_email:
if key == u'type' and an_email[key] == u'custom':
continue
if key == u'customType':
print convertUTF8(u' type: %s' % an_email[key])
else:
print convertUTF8(u' %s: %s' % (key, an_email[key]))
print u''
if u'relations' in user:
print u'Relations:'
for relation in user[u'relations']:
for key in relation:
if key == u'type' and relation[key] == u'custom':
continue
elif key == u'customType':
print convertUTF8(u' %s: %s' % (u'type', relation[key]))
else:
print convertUTF8(u' %s: %s' % (key, relation[key]))
print u''
if u'externalIds' in user:
print u'External IDs:'
for externalId in user[u'externalIds']:
for key in externalId:
if key == u'type' and externalId[key] == u'custom':
continue
elif key == u'customType':
print convertUTF8(u' %s: %s' % (u'type', externalId[key]))
else:
print convertUTF8(u' %s: %s' % (key, externalId[key]))
print u''
if getSchemas:
if u'customSchemas' in user:
print u'Custom Schemas:'
for schema in user[u'customSchemas']:
print u' Schema: %s' % schema
for field in user[u'customSchemas'][schema]:
if type(user[u'customSchemas'][schema][field]) is list:
print u' %s:' % field
for an_item in user[u'customSchemas'][schema][field]:
print convertUTF8(u' %s' % an_item[u'value'])
else:
print convertUTF8(u' %s: %s' % (field, user[u'customSchemas'][schema][field]))
print
if getAliases:
if u'aliases' in user:
print u'Email Aliases:'
for alias in user[u'aliases']:
print u' %s' % alias
if u'nonEditableAliases' in user:
print u'Non-Editable Aliases:'
for alias in user[u'nonEditableAliases']:
print u' %s' % alias
if getGroups:
groups = callGAPIpages(cd.groups(), u'list', u'groups', userKey=user_email, fields=u'groups(name,email),nextPageToken')
if len(groups) > 0:
print u'Groups: (%s)' % len(groups)
for group in groups:
print u' %s <%s>' % (group[u'name'], group[u'email'])
if getLicenses:
print u'Licenses:'
lic = buildGAPIObject(u'licensing')
for sku in [u'Google-Apps', u'Google-Apps-For-Business', u'Google-Apps-Unlimited', u'Google-Apps-For-Postini',
u'Google-Coordinate', u'Google-Drive-storage-20GB', u'Google-Drive-storage-50GB', u'Google-Drive-storage-200GB',
u'Google-Drive-storage-400GB', u'Google-Drive-storage-1TB', u'Google-Drive-storage-2TB',
u'Google-Drive-storage-4TB', u'Google-Drive-storage-8TB', u'Google-Drive-storage-16TB', u'Google-Vault',
u'Google-Vault-Former-Employee']:
productId, skuId = getProductAndSKU(sku)
try:
result = callGAPI(lic.licenseAssignments(), u'get', throw_reasons=[u'notFound'], userId=user_email, productId=productId, skuId=skuId)
except googleapiclient.errors.HttpError:
continue
print u' %s' % result[u'skuId']
def doGetGroupInfo(group_name=None):
cd = buildGAPIObject(u'directory')
gs = buildGAPIObject(u'groupssettings')
get_users = True
if group_name == None:
group_name = sys.argv[3]
i = 4
else:
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'nousers':
get_users = False
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam info group"' % sys.argv[i]
sys.exit(2)
if group_name[:4].lower() == u'uid:':
group_name = group_name[4:]
elif group_name.find(u'@') == -1:
group_name = group_name+u'@'+GC_Values[GC_DOMAIN]
basic_info = callGAPI(cd.groups(), u'get', groupKey=group_name)
try:
settings = callGAPI(gs.groups(), u'get', retry_reasons=[u'serviceLimit'], throw_reasons=u'authError',
groupUniqueId=basic_info[u'email']) # Use email address retrieved from cd since GS API doesn't support uid
except googleapiclient.errors.HttpError:
pass
print u''
print u'Group Settings:'
for key, value in basic_info.items():
if key in [u'kind', u'etag']:
continue
elif type(value) == type(list()):
print u' %s:' % key
for val in value:
print u' %s' % val
else:
print convertUTF8(u' %s: %s' % (key, value))
try:
for key, value in settings.items():
if key in [u'kind', u'etag', u'description', u'email', u'name']:
continue
elif key == u'maxMessageBytes':
if value > 1024*1024:
value = u'%sM' % (value / 1024 / 1024)
elif value > 1024:
value = u'%sK' % (value / 1024)
print u' %s: %s' % (key, value)
except UnboundLocalError:
pass
if get_users:
members = callGAPIpages(cd.members(), u'list', u'members', groupKey=group_name)
print u'Members:'
for member in members:
try:
print u' %s: %s (%s)' % (member[u'role'].lower(), member[u'email'], member[u'type'].lower())
except KeyError:
try:
print u' member: %s (%s)' % (member[u'email'], member[u'type'].lower())
except KeyError:
print u' member: %s (%s)' % (member[u'id'], member[u'type'].lower())
print u'Total %s users in group' % len(members)
def doGetAliasInfo(alias_email=None):
cd = buildGAPIObject(u'directory')
if alias_email == None:
alias_email = sys.argv[3]
if alias_email.find(u'@') == -1:
alias_email = u'%s@%s' % (alias_email, GC_Values[GC_DOMAIN])
try:
result = callGAPI(cd.users(), u'get', throw_reasons=[u'invalid', u'badRequest'], userKey=alias_email)
except googleapiclient.errors.HttpError:
result = callGAPI(cd.groups(), u'get', groupKey=alias_email)
print u' Alias Email: %s' % alias_email
try:
if result[u'primaryEmail'].lower() == alias_email.lower():
print u'Error: %s is a primary user email address, not an alias.' % alias_email
sys.exit(3)
print u' User Email: %s' % result[u'primaryEmail']
except KeyError:
print u' Group Email: %s' % result[u'email']
print u' Unique ID: %s' % result[u'id']
def doGetResourceCalendarInfo():
cd = buildGAPIObject(u'directory')
resId = sys.argv[3]
resource = callGAPI(cd.resources().calendars(), u'get',
customer=GC_Values[GC_CUSTOMER_ID], calendarResourceId=resId)
for key, value in resource.items():
if key in [u'kind', u'etag', u'etags']:
continue
print u'%s: %s' % (key, value)
def doGetCrosInfo():
cd = buildGAPIObject(u'directory')
deviceId = sys.argv[3]
info = callGAPI(cd.chromeosdevices(), u'get', customerId=GC_Values[GC_CUSTOMER_ID], deviceId=deviceId)
print_json(None, info)
def doGetMobileInfo():
cd = buildGAPIObject(u'directory')
deviceId = sys.argv[3]
info = callGAPI(cd.mobiledevices(), u'get', customerId=GC_Values[GC_CUSTOMER_ID], resourceId=deviceId)
print_json(None, info)
def print_json(object_name, object_value, spacing=u''):
if object_name in [u'kind', u'etag', u'etags']:
return
if object_name != None:
sys.stdout.write(u'%s%s: ' % (spacing, object_name))
if type(object_value) is list:
if len(object_value) == 1 and type(object_value[0]) in (str, unicode, int):
sys.stdout.write(u'%s\n' % object_value[0])
return
sys.stdout.write(u'\n')
for a_value in object_value:
if type(a_value) in (str, unicode):
print u' %s%s' % (spacing, a_value)
else:
print_json(object_name=None, object_value=a_value, spacing=u' %s' % spacing)
elif type(object_value) is dict:
for another_object in object_value:
print_json(object_name=another_object, object_value=object_value[another_object], spacing=spacing)
else:
sys.stdout.write(u'%s\n' % (object_value))
def doUpdateNotification():
cd = buildGAPIObject(u'directory')
ids = list()
get_all = False
i = 3
isUnread = None
while i < len(sys.argv):
if sys.argv[i].lower() == u'unread':
isUnread = True
mark_as = u'unread'
i += 1
elif sys.argv[i].lower() == u'read':
isUnread = False
mark_as = u'read'
i += 1
elif sys.argv[i].lower() == u'id':
if sys.argv[i+1].lower() == u'all':
get_all = True
else:
ids.append(sys.argv[i+1])
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam update notification"' % sys.argv[i]
sys.exit(2)
if isUnread == None:
print u'ERROR: notifications need to be marked as read or unread.'
sys.exit(2)
if get_all:
notifications = callGAPIpages(cd.notifications(), u'list', u'items', customer=GC_Values[GC_CUSTOMER_ID], fields=u'items(notificationId,isUnread),nextPageToken')
for noti in notifications:
if noti[u'isUnread'] != isUnread:
ids.append(noti[u'notificationId'])
print u'Marking %s notification(s) as %s...' % (len(ids), mark_as)
for notificationId in ids:
result = callGAPI(cd.notifications(), u'patch', customer=GC_Values[GC_CUSTOMER_ID], notificationId=notificationId, body={u'isUnread': isUnread}, fields=u'notificationId,isUnread')
if result[u'isUnread']:
read_result = u'unread'
else:
read_result = u'read'
print u'marked %s as %s' % (result[u'notificationId'], read_result)
def doDeleteNotification():
cd = buildGAPIObject(u'directory')
ids = list()
get_all = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'id':
if sys.argv[i+1].lower() == u'all':
get_all = True
else:
ids.append(sys.argv[i+1])
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam delete notification", expected id' % sys.argv[i]
sys.exit(2)
if get_all:
notifications = callGAPIpages(cd.notifications(), u'list', u'items', customer=GC_Values[GC_CUSTOMER_ID], fields=u'items(notificationId),nextPageToken')
for noti in notifications:
ids.append(noti[u'notificationId'])
print u'Deleting %s notification(s)...' % len(ids)
for notificationId in ids:
callGAPI(cd.notifications(), u'delete', customer=GC_Values[GC_CUSTOMER_ID], notificationId=notificationId)
print u'deleted %s' % id
def doSiteVerifyShow():
verif = buildGAPIObject(u'siteVerification')
a_domain = sys.argv[3]
txt_record = callGAPI(verif.webResource(), u'getToken', body={u'site':{u'type':u'INET_DOMAIN', u'identifier':a_domain}, u'verificationMethod':u'DNS_TXT'})
print u'TXT Record Name: %s' % a_domain
print u'TXT Record Value: %s' % txt_record[u'token']
print
cname_record = callGAPI(verif.webResource(), u'getToken', body={u'site':{u'type':u'INET_DOMAIN', u'identifier':a_domain}, u'verificationMethod':u'DNS_CNAME'})
cname_token = cname_record[u'token']
cname_list = cname_token.split(u' ')
cname_subdomain = cname_list[0]
cname_value = cname_list[1]
print u'CNAME Record Name: %s.%s' % (cname_subdomain, a_domain)
print u'CNAME Record Value: %s' % cname_value
print u''
webserver_file_record = callGAPI(verif.webResource(), u'getToken', body={u'site':{u'type':u'SITE', u'identifier':u'http://%s/' % a_domain}, u'verificationMethod':u'FILE'})
webserver_file_token = webserver_file_record[u'token']
print u'Saving web server verification file to: %s' % webserver_file_token
writeFile(webserver_file_token, u'google-site-verification: {0}'.format(webserver_file_token), continueOnError=True)
print u'Verification File URL: http://%s/%s' % (a_domain, webserver_file_token)
print
webserver_meta_record = callGAPI(verif.webResource(), u'getToken', body={u'site':{u'type':u'SITE', u'identifier':u'http://%s/' % a_domain}, u'verificationMethod':u'META'})
print u'Meta URL: http://%s/' % a_domain
print u'Meta HTML Header Data: %s' % webserver_meta_record[u'token']
print
def doGetSiteVerifications():
verif = buildGAPIObject(u'siteVerification')
sites = callGAPI(verif.webResource(), u'list')
try:
for site in sites[u'items']:
print u'Site: %s' % site[u'site'][u'identifier']
print u'Type: %s' % site[u'site'][u'type']
print u'Owners:'
for owner in site[u'owners']:
print u' %s' % owner
print
except KeyError:
print u'No Sites Verified.'
def doSiteVerifyAttempt():
verif = buildGAPIObject(u'siteVerification')
a_domain = sys.argv[3]
verificationMethod = sys.argv[4].upper()
if verificationMethod == u'CNAME':
verificationMethod = u'DNS_CNAME'
elif verificationMethod in [u'TXT', u'TEXT']:
verificationMethod = u'DNS_TXT'
if verificationMethod in [u'DNS_TXT', u'DNS_CNAME']:
verify_type = u'INET_DOMAIN'
identifier = a_domain
else:
verify_type = u'SITE'
identifier = u'http://%s/' % a_domain
body = {u'site':{u'type':verify_type, u'identifier':identifier}, u'verificationMethod':verificationMethod}
try:
verify_result = callGAPI(verif.webResource(), u'insert', throw_reasons=[u'badRequest'], verificationMethod=verificationMethod, body=body)
except googleapiclient.errors.HttpError, e:
error = json.loads(e.content)
message = error[u'error'][u'errors'][0][u'message']
print u'ERROR: %s' % message
verify_data = callGAPI(verif.webResource(), u'getToken', body=body)
print u'Method: %s' % verify_data[u'method']
print u'Token: %s' % verify_data[u'token']
if verify_data[u'method'] == u'DNS_CNAME':
try:
import dns.resolver
resolver = dns.resolver.Resolver()
resolver.nameservers = [u'8.8.8.8', u'8.8.4.4']
cname_token = verify_data[u'token']
cname_list = cname_token.split(u' ')
cname_subdomain = cname_list[0]
try:
answers = resolver.query(u'%s.%s' % (cname_subdomain, a_domain), u'A')
for answer in answers:
print u'DNS Record: %s' % answer
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
print u'ERROR: No such domain found in DNS!'
except ImportError:
pass
elif verify_data[u'method'] == u'DNS_TXT':
try:
import dns.resolver
resolver = dns.resolver.Resolver()
resolver.nameservers = [u'8.8.8.8', u'8.8.4.4']
try:
answers = resolver.query(a_domain, u'TXT')
for answer in answers:
print u'DNS Record: %s' % str(answer).replace(u'"', u'')
except dns.resolver.NXDOMAIN:
print u'ERROR: no such domain found in DNS!'
except ImportError:
pass
return
print u'SUCCESS!'
print u'Verified: %s' % verify_result[u'site'][u'identifier']
print u'ID: %s' % verify_result[u'id']
print u'Type: %s' % verify_result[u'site'][u'type']
print u'All Owners:'
try:
for owner in verify_result[u'owners']:
print u' %s' % owner
except KeyError:
pass
print
print u'You can now add %s or it\'s subdomains as secondary or domain aliases of the %s Google Apps Account.' % (a_domain, GC_Values[GC_DOMAIN])
def doGetNotifications():
cd = buildGAPIObject(u'directory')
i = 3
unread_only = False
while i < len(sys.argv):
if sys.argv[i].lower() == u'unreadonly':
unread_only = True
else:
print u'ERROR: %s is not a valid argument for "gam info notification", expected unreadonly' % sys.argv[i]
sys.exit(2)
i += 1
notifications = callGAPIpages(cd.notifications(), u'list', u'items', customer=GC_Values[GC_CUSTOMER_ID])
for notification in notifications:
if unread_only and not notification[u'isUnread']:
continue
print u'From: %s' % notification[u'fromAddress']
print u'Subject: %s' % notification[u'subject']
print u'Date: %s' % notification[u'sendTime']
print u'ID: %s' % notification[u'notificationId']
if notification[u'isUnread']:
print u'Read Status: UNREAD'
else:
print u'Read Status: READ'
print u''
print convertUTF8(dehtml(notification[u'body']))
print u''
print u'--------------'
print u''
def doGetOrgInfo():
cd = buildGAPIObject(u'directory')
name = sys.argv[3]
get_users = True
show_children = False
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'nousers':
get_users = False
i += 1
elif sys.argv[i].lower() in [u'children', u'child']:
show_children = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam info org"' % sys.argv[i]
sys.exit(2)
if name == u'/':
orgs = callGAPI(cd.orgunits(), u'list',
customerId=GC_Values[GC_CUSTOMER_ID], type=u'children',
fields=u'organizationUnits/parentOrgUnitId')
name = orgs[u'organizationUnits'][0][u'parentOrgUnitId']
if len(name) > 1 and name[0] == u'/':
name = name[1:]
result = callGAPI(cd.orgunits(), u'get', customerId=GC_Values[GC_CUSTOMER_ID], orgUnitPath=name)
print_json(None, result)
if get_users:
name = result[u'orgUnitPath']
print u'Users: '
page_message = u'Got %%total_items%% users: %%first_item%% - %%last_item%%\n'
users = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
message_attribute=u'primaryEmail', customer=GC_Values[GC_CUSTOMER_ID], query=u"orgUnitPath='%s'" % name,
fields=u'users(primaryEmail,orgUnitPath),nextPageToken', maxResults=GC_Values[GC_USER_MAX_RESULTS])
for user in users:
if show_children or (name.lower() == user[u'orgUnitPath'].lower()):
sys.stdout.write(u' %s' % user[u'primaryEmail'])
if name.lower() != user[u'orgUnitPath'].lower():
print u' (child)'
else:
print u''
def doGetASPs(users):
cd = buildGAPIObject(u'directory')
for user in users:
asps = callGAPI(cd.asps(), u'list', userKey=user)
print u'Application-Specific Passwords for %s' % user
try:
for asp in asps[u'items']:
if asp[u'creationTime'] == u'0':
created_date = u'Unknown'
else:
created_date = datetime.datetime.fromtimestamp(int(asp[u'creationTime'])/1000).strftime(u'%Y-%m-%d %H:%M:%S')
if asp[u'lastTimeUsed'] == u'0':
used_date = u'Never'
else:
used_date = datetime.datetime.fromtimestamp(int(asp[u'lastTimeUsed'])/1000).strftime(u'%Y-%m-%d %H:%M:%S')
print u' ID: %s\n Name: %s\n Created: %s\n Last Used: %s\n' % (asp[u'codeId'], asp[u'name'], created_date, used_date)
except KeyError:
print u' no ASPs for %s\n' % user
def doDelASP(users):
cd = buildGAPIObject(u'directory')
codeId = sys.argv[5]
for user in users:
callGAPI(cd.asps(), u'delete', userKey=user, codeId=codeId)
print u'deleted ASP %s for %s' % (codeId, user)
def printBackupCodes(user, codes):
jcount = len(codes[u'items']) if (codes and (u'items' in codes)) else 0
print u'Backup verification codes for {0}'.format(user)
print u''
if jcount > 0:
j = 0
for code in codes[u'items']:
j += 1
print u'{0}. {1}'.format(j, code[u'verificationCode'])
print u''
def doGetBackupCodes(users):
cd = buildGAPIObject(u'directory')
for user in users:
try:
codes = callGAPI(cd.verificationCodes(), u'list', throw_reasons=[u'invalidArgument', u'invalid'], userKey=user)
except googleapiclient.errors.HttpError:
codes = None
printBackupCodes(user, codes)
def doGenBackupCodes(users):
cd = buildGAPIObject(u'directory')
for user in users:
callGAPI(cd.verificationCodes(), u'generate', userKey=user)
codes = callGAPI(cd.verificationCodes(), u'list', userKey=user)
printBackupCodes(user, codes)
def doDelBackupCodes(users):
cd = buildGAPIObject(u'directory')
for user in users:
try:
callGAPI(cd.verificationCodes(), u'invalidate', soft_errors=True, throw_reasons=[u'invalid',], userKey=user)
except googleapiclient.errors.HttpError:
print u'No 2SV backup codes for %s' % user
continue
print u'2SV backup codes for %s invalidated' % user
def commonClientIds(clientId):
if clientId == u'gasmo':
return u'1095133494869.apps.googleusercontent.com'
return clientId
def doGetTokens(users):
cd = buildGAPIObject(u'directory')
clientId = None
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'clientid':
clientId = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for gam <users> show tokens' % sys.argv[i]
sys.exit(2)
if clientId:
clientId = commonClientIds(clientId)
for user in users:
try:
token = callGAPI(cd.tokens(), u'get', throw_reasons=[u'notFound',], userKey=user, clientId=clientId, fields=u'clientId')
except googleapiclient.errors.HttpError:
continue
print u'%s has allowed this token' % user
return
for user in users:
tokens = callGAPI(cd.tokens(), u'list', userKey=user)
print u'Tokens for %s:' % user
try:
for token in tokens[u'items']:
print u' Client ID: %s' % token[u'clientId']
for item in token:
if item in [u'etag', u'kind', u'clientId']:
continue
if type(token[item]) is list:
print u' %s:' % item
for it in token[item]:
print u' %s' % it
if type(token[item]) in (unicode, bool):
try:
print u' %s: %s' % (item, token[item])
except UnicodeEncodeError:
print u' %s: %s' % (item, token[item][:-1])
print u''
except KeyError:
print u' no tokens for %s' % user
print u''
def doDelTokens(users):
cd = buildGAPIObject(u'directory')
clientId = sys.argv[6]
clientId = commonClientIds(clientId)
for user in users:
callGAPI(cd.tokens(), u'delete', userKey=user, clientId=clientId)
print u'Deleted token for %s' % user
def doDeprovUser(users):
cd = buildGAPIObject(u'directory')
for user in users:
print u'Getting Application Specific Passwords for %s' % user
asps = callGAPI(cd.asps(), u'list', userKey=user, fields=u'items/codeId')
i = 1
try:
for asp in asps[u'items']:
print u' deleting ASP %s of %s' % (i, len(asps[u'items']))
callGAPI(cd.asps(), u'delete', userKey=user, codeId=asp[u'codeId'])
i += 1
except KeyError:
print u'No ASPs'
print u'Invalidating 2SV Backup Codes for %s' % user
try:
callGAPI(cd.verificationCodes(), u'invalidate', soft_errors=True, throw_reasons=[u'invalid'], userKey=user)
except googleapiclient.errors.HttpError:
print u'No 2SV Backup Codes'
print u'Getting tokens for %s...' % user
tokens = callGAPI(cd.tokens(), u'list', userKey=user, fields=u'items/clientId')
i = 1
try:
for token in tokens[u'items']:
print u' deleting token %s of %s' % (i, len(tokens[u'items']))
callGAPI(cd.tokens(), u'delete', userKey=user, clientId=token[u'clientId'])
i += 1
except KeyError:
print u'No Tokens'
print u'Done deprovisioning %s' % user
def doUpdateInstance():
adminObj = getAdminSettingsObject()
command = sys.argv[3].lower()
if command == u'language':
language = sys.argv[4]
callGData(adminObj, u'UpdateDefaultLanguage', defaultLanguage=language)
elif command == u'name':
name = sys.argv[4]
callGData(adminObj, u'UpdateOrganizationName', organizationName=name)
elif command == u'admin_secondary_email':
admin_secondary_email = sys.argv[4]
callGData(adminObj, u'UpdateAdminSecondaryEmail', adminSecondaryEmail=admin_secondary_email)
elif command == u'logo':
logoFile = sys.argv[4]
logoImage = readFile(logoFile)
callGData(adminObj, u'UpdateDomainLogo', logoImage=logoImage)
elif command == u'mx_verify':
result = callGData(adminObj, u'UpdateMXVerificationStatus')
print u'Verification Method: %s' % result[u'verificationMethod']
print u'Verified: %s' % result[u'verified']
elif command == u'sso_settings':
enableSSO = samlSignonUri = samlLogoutUri = changePasswordUri = ssoWhitelist = useDomainSpecificIssuer = None
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'enabled':
if sys.argv[i+1].lower() == u'true':
enableSSO = True
elif sys.argv[i+1].lower() == u'false':
enableSSO = False
else:
print u'ERROR: value for enabled must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'sign_on_uri':
samlSignonUri = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'sign_out_uri':
samlLogoutUri = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'password_uri':
changePasswordUri = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'whitelist':
ssoWhitelist = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'use_domain_specific_issuer':
if sys.argv[i+1].lower() == u'true':
useDomainSpecificIssuer = True
elif sys.argv[i+1].lower() == u'false':
useDomainSpecificIssuer = False
else:
print u'ERROR: value for use_domain_specific_issuer must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
else:
print u'ERROR: unknown option for "gam update domain sso_settings...": %s' % sys.argv[i]
sys.exit(2)
callGData(adminObj, u'UpdateSSOSettings', enableSSO=enableSSO,
samlSignonUri=samlSignonUri, samlLogoutUri=samlLogoutUri,
changePasswordUri=changePasswordUri, ssoWhitelist=ssoWhitelist,
useDomainSpecificIssuer=useDomainSpecificIssuer)
elif command == u'sso_key':
keyFile = sys.argv[4]
keyData = readFile(keyFile)
callGData(adminObj, u'UpdateSSOKey', signingKey=keyData)
elif command == u'user_migrations':
value = sys.argv[4].lower()
if value not in [u'true', u'false']:
print u'ERROR: value for user_migrations must be true or false, got %s' % sys.argv[4]
sys.exit(2)
result = callGData(adminObj, u'UpdateUserMigrationStatus', enableUserMigration=value)
elif command == u'outbound_gateway':
gateway = sys.argv[4]
mode = sys.argv[6].upper()
try:
result = callGData(adminObj, u'UpdateOutboundGatewaySettings', smartHost=gateway, smtpMode=mode)
except TypeError:
pass
elif command == u'email_route':
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() == u'destination':
destination = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'rewrite_to':
rewrite_to = sys.argv[i+1].lower()
if rewrite_to == u'true':
rewrite_to = True
elif rewrite_to == u'false':
rewrite_to = False
else:
print u'ERROR: value for rewrite_to must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'enabled':
enabled = sys.argv[i+1].lower()
if enabled == u'true':
enabled = True
elif enabled == u'false':
enabled = False
else:
print u'ERROR: value for enabled must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'bounce_notifications':
bounce_notifications = sys.argv[i+1].lower()
if bounce_notifications == u'true':
bounce_notifications = True
elif bounce_notifications == u'false':
bounce_notifications = False
else:
print u'ERROR: value for bounce_notifications must be true or false, got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
elif sys.argv[i].lower() == u'account_handling':
account_handling = sys.argv[i+1].lower()
if account_handling == u'all_accounts':
account_handling = u'allAccounts'
elif account_handling == u'provisioned_accounts':
account_handling = u'provisionedAccounts'
elif account_handling == u'unknown_accounts':
account_handling = u'unknownAccounts'
else:
print u'ERROR: value for account_handling must be all_accounts, provisioned_accounts or unknown_accounts. Got %s' % sys.argv[i+1]
sys.exit(2)
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam update instance email_route"' % sys.argv[i]
sys.exit(2)
callGData(adminObj, u'AddEmailRoute', routeDestination=destination, routeRewriteTo=rewrite_to, routeEnabled=enabled,
bounceNotifications=bounce_notifications, accountHandling=account_handling)
else:
print u'ERROR: %s is not a valid argument for "gam update instance"' % command
sys.exit(2)
def doGetInstanceInfo():
adm = buildGAPIObject(u'admin-settings')
if len(sys.argv) > 4 and sys.argv[3].lower() == u'logo':
target_file = sys.argv[4]
url = u'http://www.google.com/a/cpanel/%s/images/logo.gif' % (GC_Values[GC_DOMAIN])
geturl(url, target_file)
sys.exit(0)
print u'Google Apps Domain: %s' % (GC_Values[GC_DOMAIN])
if GC_Values[GC_CUSTOMER_ID] != MY_CUSTOMER:
customerId = GC_Values[GC_CUSTOMER_ID]
else:
cd = buildGAPIObject(u'directory')
result = callGAPI(cd.users(), u'list',
fields=u'users(customerId)', customer=GC_Values[GC_CUSTOMER_ID], maxResults=1)
try:
customerId = result[u'users'][0][u'customerId']
except KeyError:
customerId = UNKNOWN
print u'Customer ID: %s' % customerId
default_language = callGAPI(adm.defaultLanguage(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Default Language: %s' % default_language[u'entry'][u'apps$property'][0][u'value']
org_name = callGAPI(adm.organizationName(), u'get', domainName=GC_Values[GC_DOMAIN])
print convertUTF8(u'Organization Name: %s' % org_name[u'entry'][u'apps$property'][0][u'value'])
admin_email = callGAPI(adm.adminSecondaryEmail(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Admin Secondary Email: %s' % admin_email[u'entry'][u'apps$property'][0][u'value']
max_users = callGAPI(adm.maximumNumberOfUsers(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Maximum Users: %s' % max_users[u'entry'][u'apps$property'][0][u'value']
current_users = callGAPI(adm.currentNumberOfUsers(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Current Users: %s' % current_users[u'entry'][u'apps$property'][0][u'value']
is_dom_verified = callGAPI(adm.isVerified(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Domain is Verified: %s' % is_dom_verified[u'entry'][u'apps$property'][0][u'value']
domain_edition = callGAPI(adm.edition(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Domain Edition: %s' % domain_edition[u'entry'][u'apps$property'][0][u'value']
customer_pin = callGAPI(adm.customerPIN(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Customer PIN: %s' % customer_pin[u'entry'][u'apps$property'][0][u'value']
creation_time = callGAPI(adm.creationTime(), u'get', domainName=GC_Values[GC_DOMAIN])
my_date = creation_time[u'entry'][u'apps$property'][0][u'value']
my_date = my_date[:15]
my_offset = creation_time[u'entry'][u'apps$property'][0][u'value'][19:]
nice_time = datetime.datetime.strptime(my_date, u"%Y%m%dT%H%M%S")
print u'Domain Creation Time: %s %s' % (nice_time, my_offset)
country_code = callGAPI(adm.countryCode(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'Domain Country Code: %s' % country_code[u'entry'][u'apps$property'][0][u'value']
mxverificationstatus = callGAPI(adm.mxVerification(), u'get', domainName=GC_Values[GC_DOMAIN])
for entry in mxverificationstatus[u'entry'][u'apps$property']:
if entry[u'name'] == u'verified':
print u'MX Verification Verified: %s' % entry[u'value']
elif entry[u'name'] == u'verificationMethod':
print u'MX Verification Method: %s' % entry[u'value']
ssosettings = callGAPI(adm.ssoGeneral(), u'get', domainName=GC_Values[GC_DOMAIN])
for entry in ssosettings[u'entry'][u'apps$property']:
if entry[u'name'] == u'enableSSO':
print u'SSO Enabled: %s' % entry[u'value']
elif entry[u'name'] == u'samlSignonUri':
print u'SSO Signon Page: %s' % entry[u'value']
elif entry[u'name'] == u'samlLogoutUri':
print u'SSO Logout Page: %s' % entry[u'value']
elif entry[u'name'] == u'changePasswordUri':
print u'SSO Password Page: %s' % entry[u'value']
elif entry[u'name'] == u'ssoWhitelist':
print u'SSO Whitelist IPs: %s' % entry[u'value']
elif entry[u'name'] == u'useDomainSpecificIssuer':
print u'SSO Use Domain Specific Issuer: %s' % entry[u'value']
ssokey = callGAPI(adm.ssoSigningKey(), u'get', silent_errors=True, soft_errors=True, domainName=GC_Values[GC_DOMAIN])
try:
for entry in ssokey[u'entry'][u'apps$property']:
if entry[u'name'] == u'algorithm':
print u'SSO Key Algorithm: %s' % entry[u'value']
elif entry[u'name'] == u'format':
print u'SSO Key Format: %s' % entry[u'value']
elif entry[u'name'] == u'modulus':
print u'SSO Key Modulus: %s' % entry[u'value']
elif entry[u'name'] == u'exponent':
print u'SSO Key Exponent: %s' % entry[u'value']
elif entry[u'name'] == u'yValue':
print u'SSO Key yValue: %s' % entry[u'value']
elif entry[u'name'] == u'signingKey':
print u'Full SSO Key: %s' % entry[u'value']
except TypeError:
pass
migration_status = callGAPI(adm.userEmailMigrationEnabled(), u'get', domainName=GC_Values[GC_DOMAIN])
print u'User Migration Enabled: %s' % migration_status[u'entry'][u'apps$property'][0][u'value']
outbound_gateway_settings = {u'smartHost': u'', u'smtpMode': u''} # Initialize blank in case we get an 1801 Error
outbound_gateway_settings = callGAPI(adm.outboundGateway(), u'get', domainName=GC_Values[GC_DOMAIN])
try:
for entry in outbound_gateway_settings[u'entry'][u'apps$property']:
if entry[u'name'] == u'smartHost':
print u'Outbound Gateway Smart Host: %s' % entry[u'value']
elif entry[u'name'] == u'smtpMode':
print u'Outbound Gateway SMTP Mode: %s' % entry[u'value']
except KeyError:
print u'Outbound Gateway Smart Host: None'
print u'Outbound Gateway SMTP Mode: None'
def doDeleteUser():
cd = buildGAPIObject(u'directory')
user_email = sys.argv[3]
if user_email[:4].lower() == u'uid:':
user_email = user_email[4:]
elif user_email.find(u'@') == -1:
user_email = u'%s@%s' % (user_email, GC_Values[GC_DOMAIN])
print u"Deleting account for %s" % (user_email)
callGAPI(cd.users(), u'delete', userKey=user_email)
def doUndeleteUser():
cd = buildGAPIObject(u'directory')
user = sys.argv[3].lower()
user_uid = False
orgUnit = u'/'
i = 4
while i < len(sys.argv):
if sys.argv[i].lower() in [u'ou', u'org']:
orgUnit = sys.argv[i+1]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam undelete user"' % sys.argv[i]
sys.exit(2)
if user[:4].lower() == u'uid:':
user_uid = user[4:]
elif user.find(u'@') == -1:
user = u'%s@%s' % (user, GC_Values[GC_DOMAIN])
if not user_uid:
print u'Looking up UID for %s...' % user
deleted_users = callGAPIpages(cd.users(), u'list', u'users',
customer=GC_Values[GC_CUSTOMER_ID], showDeleted=True, maxResults=GC_Values[GC_USER_MAX_RESULTS])
matching_users = list()
for deleted_user in deleted_users:
if str(deleted_user[u'primaryEmail']).lower() == user:
matching_users.append(deleted_user)
if len(matching_users) < 1:
print u'ERROR: could not find deleted user with that address.'
sys.exit(3)
elif len(matching_users) > 1:
print u'ERROR: more than one matching deleted %s user. Please select the correct one to undelete and specify with "gam undelete user uid:<uid>"' % user
print u''
for matching_user in matching_users:
print u' uid:%s ' % matching_user[u'id']
for attr_name in [u'creationTime', u'lastLoginTime', u'deletionTime']:
try:
if matching_user[attr_name] == u'1970-01-01T00:00:00.000Z':
matching_user[attr_name] = u'Never'
print u' %s: %s ' % (attr_name, matching_user[attr_name])
except KeyError:
pass
print
sys.exit(3)
else:
user_uid = matching_users[0][u'id']
print u"Undeleting account for %s" % user
callGAPI(cd.users(), u'undelete', userKey=user_uid, body={u'orgUnitPath': orgUnit})
def doDeleteGroup():
cd = buildGAPIObject(u'directory')
group = sys.argv[3]
if group[:4].lower() == u'uid:':
group = group[4:]
elif group.find(u'@') == -1:
group = u'%s@%s' % (group, GC_Values[GC_DOMAIN])
print u"Deleting group %s" % group
callGAPI(cd.groups(), u'delete', groupKey=group)
def doDeleteAlias(alias_email=None):
cd = buildGAPIObject(u'directory')
is_user = is_group = False
if alias_email == None:
alias_email = sys.argv[3]
if alias_email.lower() == u'user':
is_user = True
alias_email = sys.argv[4]
elif alias_email.lower() == u'group':
is_group = True
alias_email = sys.argv[4]
if alias_email.find(u'@') == -1:
alias_email = u'%s@%s' % (alias_email, GC_Values[GC_DOMAIN])
print u"Deleting alias %s" % alias_email
if is_user or (not is_user and not is_group):
try:
callGAPI(cd.users().aliases(), u'delete', throw_reasons=[u'invalid', u'badRequest', u'notFound'], userKey=alias_email, alias=alias_email)
return
except googleapiclient.errors.HttpError, e:
error = json.loads(e.content)
reason = error[u'error'][u'errors'][0][u'reason']
if reason == u'notFound':
print u'Error: The alias %s does not exist' % alias_email
sys.exit(4)
if not is_user or (not is_user and not is_group):
callGAPI(cd.groups().aliases(), u'delete', groupKey=alias_email, alias=alias_email)
def doDeleteResourceCalendar():
resId = sys.argv[3]
cd = buildGAPIObject(u'directory')
print u"Deleting resource calendar %s" % resId
callGAPI(cd.resources().calendars(), u'delete',
customer=GC_Values[GC_CUSTOMER_ID], calendarResourceId=resId)
def doDeleteOrg():
cd = buildGAPIObject(u'directory')
name = sys.argv[3]
if name[0] == u'/':
name = name[1:]
print u"Deleting organization %s" % name
callGAPI(cd.orgunits(), u'delete', customerId=GC_Values[GC_CUSTOMER_ID], orgUnitPath=name)
def output_csv(csv_list, titles, list_type, todrive):
csv.register_dialect(u'nixstdout', lineterminator=u'\n')
if todrive:
string_file = StringIO.StringIO()
writer = csv.DictWriter(string_file, fieldnames=titles, dialect=u'nixstdout', quoting=csv.QUOTE_MINIMAL)
else:
writer = csv.DictWriter(sys.stdout, fieldnames=titles, dialect=u'nixstdout', quoting=csv.QUOTE_MINIMAL)
writer.writerows(csv_list)
if todrive:
columns = len(csv_list[0])
rows = len(csv_list)
cell_count = rows * columns
convert = True
if cell_count > 500000 or columns > 256:
print u'{0}{1}'.format(WARNING_PREFIX, MESSAGE_RESULTS_TOO_LARGE_FOR_GOOGLE_SPREADSHEET)
convert = False
drive = buildGAPIObject(u'drive')
result = callGAPI(drive.files(), u'insert', convert=convert,
body={u'description': u' '.join(sys.argv), u'title': u'%s - %s' % (GC_Values[GC_DOMAIN], list_type), u'mimeType': u'text/csv'},
media_body=googleapiclient.http.MediaInMemoryUpload(string_file.getvalue(), mimetype=u'text/csv'))
file_url = result[u'alternateLink']
if GC_Values[GC_NO_BROWSER]:
msg_txt = u'Drive file uploaded to:\n %s' % file_url
msg_subj = u'%s - %s' % (GC_Values[GC_DOMAIN], list_type)
send_email(msg_subj, msg_txt)
print msg_txt
else:
import webbrowser
webbrowser.open(file_url)
def flatten_json(structure, key=u'', path=u'', flattened=None):
if flattened == None:
flattened = {}
if type(structure) not in(dict, list):
flattened[((path + u'.') if path else u'') + key] = structure
elif isinstance(structure, list):
for i, item in enumerate(structure):
flatten_json(item, u'%d' % i, u'.'.join(filter(None, [path, key])), flattened)
else:
for new_key, value in structure.items():
if new_key in [u'kind', u'etag']:
continue
if value == u'1970-01-01T00:00:00.000Z':
value = u'Never'
flatten_json(value, new_key, u'.'.join(filter(None, [path, key])), flattened)
return flattened
def doPrintUsers():
cd = buildGAPIObject(u'directory')
user_fields = [u'primaryEmail',]
fields = u''
customer = GC_Values[GC_CUSTOMER_ID]
domain = None
query = None
projection = u'basic'
customFieldMask = None
getGroupFeed = getLicenseFeed = email_parts = False
todrive = False
viewType = deleted_only = orderBy = sortOrder = None
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'allfields':
fields = None
i += 1
elif sys.argv[i].lower() == u'custom':
user_fields.append(u'customSchemas')
if sys.argv[i+1].lower() == u'all':
projection = u'full'
else:
projection = u'custom'
customFieldMask = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() in [u'deleted_only', u'only_deleted']:
deleted_only = True
i += 1
elif sys.argv[i].lower() == u'orderby':
orderBy = sys.argv[i+1]
if orderBy.lower() not in [u'email', u'familyname', u'givenname', u'firstname', u'lastname']:
print u'ERROR: orderby should be email, familyName or givenName. Got %s' % orderBy
sys.exit(2)
elif orderBy.lower() in [u'familyname', u'lastname']:
orderBy = u'familyName'
elif orderBy.lower() in [u'givenname', u'firstname']:
orderBy = u'givenName'
i += 2
elif sys.argv[i].lower() == u'userview':
viewType = u'domain_public'
i += 1
elif sys.argv[i].lower() in [u'ascending', u'descending']:
sortOrder = sys.argv[i].upper()
i += 1
elif sys.argv[i].lower() == u'domain':
domain = sys.argv[i+1]
customer = None
i += 2
elif sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() in [u'firstname', u'givenname', u'lastname', u'familyName', u'fullname']:
user_fields.append(u'name')
i += 1
elif sys.argv[i].lower() == u'ou':
user_fields.append(u'orgUnitPath')
i += 1
elif sys.argv[i].lower() == u'suspended':
user_fields.append(u'suspended')
user_fields.append(u'suspensionReason')
i += 1
elif sys.argv[i].lower() == u'ismailboxsetup':
user_fields.append(u'isMailboxSetup')
i += 1
elif sys.argv[i].lower() == u'changepassword':
user_fields.append(u'changePasswordAtNextLogin')
i += 1
elif sys.argv[i].lower() == u'agreed2terms':
user_fields.append(u'agreedToTerms')
i += 1
elif sys.argv[i].lower() == u'admin':
user_fields.append(u'isAdmin')
user_fields.append(u'isDelegatedAdmin')
i += 1
elif sys.argv[i].lower() == u'gal':
user_fields.append(u'includeInGlobalAddressList')
i += 1
elif sys.argv[i].lower() in [u'photo', u'photourl']:
user_fields.append(u'thumbnailPhotoUrl')
i += 1
elif sys.argv[i].lower() == u'id':
user_fields.append(u'id')
i += 1
elif sys.argv[i].lower() == u'creationtime':
user_fields.append(u'creationTime')
i += 1
elif sys.argv[i].lower() == u'lastlogintime':
user_fields.append(u'lastLoginTime')
i += 1
elif sys.argv[i].lower() in [u'nicknames', u'aliases']:
user_fields.append(u'aliases')
user_fields.append(u'nonEditableAliases')
i += 1
elif sys.argv[i].lower() in [u'im', u'ims']:
user_fields.append(u'ims')
i += 1
elif sys.argv[i].lower() in [u'emails', u'email']:
user_fields.append(u'emails')
i += 1
elif sys.argv[i].lower().replace(u'_', u'') in [u'externalids', u'externalid']:
user_fields.append(u'externalIds')
i += 1
elif sys.argv[i].lower() in [u'relation', u'relations']:
user_fields.append(u'relations')
i += 1
elif sys.argv[i].lower() in [u'address', u'addresses']:
user_fields.append(u'addresses')
i += 1
elif sys.argv[i].lower() in [u'organization', u'organizations']:
user_fields.append(u'organizations')
i += 1
elif sys.argv[i].lower() in [u'phone', u'phones']:
user_fields.append(u'phones')
i += 1
elif sys.argv[i].lower() == u'groups':
getGroupFeed = True
i += 1
elif sys.argv[i].lower() in [u'license', u'licenses', u'licence', u'licences']:
getLicenseFeed = True
i += 1
elif sys.argv[i].lower() in [u'emailpart', u'emailparts', u'username']:
email_parts = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print users"' % sys.argv[i]
sys.exit(2)
if fields != None:
user_fields = set(user_fields)
fields = u'nextPageToken,users(%s)' % u','.join(user_fields)
sys.stderr.write(u"Getting all users in Google Apps account (may take some time on a large account)...\n")
page_message = u'Got %%total_items%% users: %%first_item%% - %%last_item%%\n'
all_users = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
message_attribute=u'primaryEmail', customer=customer, domain=domain, fields=fields,
showDeleted=deleted_only, orderBy=orderBy, sortOrder=sortOrder, viewType=viewType,
query=query, projection=projection, customFieldMask=customFieldMask, maxResults=GC_Values[GC_USER_MAX_RESULTS])
titles = [u'primaryEmail',]
attributes = []
for user in all_users:
if email_parts:
try:
user_email = user[u'primaryEmail']
if user_email.find(u'@') != -1:
user[u'primaryEmailLocal'] = user_email[:user_email.find(u'@')]
user[u'primaryEmailDomain'] = user_email[user_email.find(u'@')+1:]
except KeyError:
pass
attributes.append(flatten_json(user))
for item in attributes[-1]:
if item not in titles:
titles.append(item)
titles.remove(u'primaryEmail')
titles = sorted(titles)
titles = [u'primaryEmail'] + titles
header = {}
for title in titles:
header[title] = title
attributes.insert(0, header)
if getGroupFeed:
total_users = len(attributes) - 1
user_count = 1
titles.append(u'Groups')
attributes[0].update(Groups=u'Groups')
for user in attributes[1:]:
user_email = user[u'primaryEmail']
sys.stderr.write(u"Getting Group Membership for %s (%s/%s)\r\n" % (user_email, user_count, total_users))
groups = callGAPIpages(cd.groups(), u'list', u'groups', userKey=user_email)
grouplist = u''
for groupname in groups:
grouplist += groupname[u'email']+' '
if grouplist[-1:] == u' ':
grouplist = grouplist[:-1]
user.update(Groups=grouplist)
user_count += 1
if getLicenseFeed:
titles.append(u'Licenses')
attributes[0].update(Licenses=u'Licenses')
licenses = doPrintLicenses(return_list=True)
if len(licenses) > 1:
for user in attributes[1:]:
user_licenses = []
for u_license in licenses:
if u_license[u'userId'].lower() == user[u'primaryEmail'].lower():
user_licenses.append(u_license[u'skuId'])
user.update(Licenses=u' '.join(user_licenses))
output_csv(attributes, titles, u'Users', todrive)
def doPrintGroups():
cd = buildGAPIObject(u'directory')
i = 3
printname = printdesc = printid = members = owners = managers = settings = admin_created = aliases = todrive = False
customer = GC_Values[GC_CUSTOMER_ID]
usedomain = usemember = None
listDelimiter = u'\n'
group_attributes = [{u'Email': u'Email'}]
titles = [u'Email']
fields = u'nextPageToken,groups(email)'
while i < len(sys.argv):
if sys.argv[i].lower() == u'domain':
usedomain = sys.argv[i+1].lower()
customer = None
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() == u'delimiter':
listDelimiter = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'member':
usemember = sys.argv[i+1].lower()
customer = None
i += 2
elif sys.argv[i].lower() == u'name':
fields += u',groups(name)'
printname = True
group_attributes[0].update(Name=u'Name')
titles.append(u'Name')
i += 1
elif sys.argv[i].lower() == u'admincreated':
fields += u',groups(adminCreated)'
admin_created = True
group_attributes[0].update(Admin_Created=u'Admin_Created')
titles.append(u'Admin_Created')
i += 1
elif sys.argv[i].lower() == u'description':
fields += u',groups(description)'
group_attributes[0].update(Description=u'Description')
titles.append(u'Description')
printdesc = True
i += 1
elif sys.argv[i].lower() == u'id':
fields += u',groups(id)'
group_attributes[0].update(ID=u'ID')
titles.append(u'ID')
printid = True
i += 1
elif sys.argv[i].lower() == u'aliases':
fields += u',groups(aliases,nonEditableAliases)'
group_attributes[0].update(Aliases=u'Aliases')
group_attributes[0].update(NonEditableAliases=u'NonEditableAliases')
titles.append(u'Aliases')
titles.append(u'NonEditableAliases')
aliases = True
i += 1
elif sys.argv[i].lower() == u'members':
group_attributes[0].update(Members=u'Members')
titles.append(u'Members')
members = True
i += 1
elif sys.argv[i].lower() == u'owners':
group_attributes[0].update(Owners=u'Owners')
titles.append(u'Owners')
owners = True
i += 1
elif sys.argv[i].lower() == u'managers':
group_attributes[0].update(Managers=u'Managers')
titles.append(u'Managers')
managers = True
i += 1
elif sys.argv[i].lower() == u'settings':
settings = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print groups"' % sys.argv[i]
sys.exit(2)
sys.stderr.write(u"Retrieving All Groups for Google Apps account (may take some time on a large account)...\n")
page_message = u'Got %%num_items%% groups: %%first_item%% - %%last_item%%\n'
all_groups = callGAPIpages(cd.groups(), u'list', u'groups', page_message=page_message,
message_attribute=u'email', customer=customer, domain=usedomain, userKey=usemember, fields=fields)
total_groups = len(all_groups)
count = 0
for group_vals in all_groups:
count += 1
group = {}
group.update({u'Email': group_vals[u'email']})
if printname:
try:
group.update({u'Name': group_vals[u'name']})
except KeyError:
pass
if printdesc:
try:
group.update({u'Description': group_vals[u'description']})
except KeyError:
pass
if printid:
try:
group.update({u'ID': group_vals[u'id']})
except KeyError:
pass
if admin_created:
try:
group.update({u'Admin_Created': group_vals[u'adminCreated']})
except KeyError:
pass
if aliases:
try:
group.update({u'Aliases': ' '.join(group_vals[u'aliases'])})
except KeyError:
pass
try:
group.update({u'NonEditableAliases': ' '.join(group_vals[u'nonEditableAliases'])})
except KeyError:
pass
if members or owners or managers:
roles = list()
if members:
roles.append(u'members')
if owners:
roles.append(u'owners')
if managers:
roles.append(u'managers')
roles = u','.join(roles)
sys.stderr.write(u' Getting %s for %s (%s of %s)\n' % (roles, group_vals[u'email'], count, total_groups))
page_message = u'Got %%num_items%% members: %%first_item%% - %%last_item%%\n'
all_group_members = callGAPIpages(cd.members(), u'list', u'members', page_message=page_message,
message_attribute=u'email', groupKey=group_vals[u'email'], roles=roles, fields=u'nextPageToken,members(email,id,role)')
if members:
all_true_members = list()
if managers:
all_managers = list()
if owners:
all_owners = list()
for member in all_group_members:
member_email = member.get(u'email', member.get(u'id', None))
if not member_email:
sys.stderr.write(u' Not sure what to do with: %s' % member)
continue
try:
if members and member[u'role'] == u'MEMBER':
all_true_members.append(member_email)
elif managers and member[u'role'] == u'MANAGER':
all_managers.append(member_email)
elif owners and member[u'role'] == u'OWNER':
all_owners.append(member_email)
except KeyError:
all_true_members.append(member_email)
if members:
group.update({u'Members': listDelimiter.join(all_true_members)})
if managers:
group.update({u'Managers': listDelimiter.join(all_managers)})
if owners:
group.update({u'Owners': listDelimiter.join(all_owners)})
if settings:
sys.stderr.write(u" Retrieving Settings for group %s (%s of %s)...\r\n" % (group_vals[u'email'], count, total_groups))
gs = buildGAPIObject(u'groupssettings')
settings = callGAPI(gs.groups(), u'get', retry_reasons=[u'serviceLimit'], groupUniqueId=group_vals[u'email'])
for key in settings:
if key in [u'email', u'name', u'description', u'kind', u'etag']:
continue
setting_value = settings[key]
if setting_value == None:
setting_value = u''
if key not in titles:
group_attributes[0][key] = key
titles.append(key)
group.update({key: setting_value})
group_attributes.append(group)
output_csv(group_attributes, titles, u'Groups', todrive)
def doPrintOrgs():
cd = buildGAPIObject(u'directory')
i = 3
printname = printdesc = printparent = printinherit = todrive = False
listType = u'all'
orgUnitPath = u"/"
org_attributes = [{}]
fields = u'organizationUnits(orgUnitPath)'
titles = []
while i < len(sys.argv):
if sys.argv[i].lower() == u'allfields':
fields = None
i += 1
elif sys.argv[i].lower() == u'name':
printname = True
org_attributes[0].update(Name=u'Name')
fields += u',organizationUnits(name)'
titles.append(u'Name')
i += 1
elif sys.argv[i].lower() == u'toplevelonly':
listType = u'children'
i += 1
elif sys.argv[i].lower() == u'from_parent':
orgUnitPath = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() == u'description':
printdesc = True
fields += u',organizationUnits(description)'
org_attributes[0].update(Description=u'Description')
titles.append(u'Description')
i += 1
elif sys.argv[i].lower() == u'parent':
printparent = True
fields += u',organizationUnits(parentOrgUnitPath)'
org_attributes[0].update(Parent=u'Parent')
titles.append(u'Parent')
i += 1
elif sys.argv[i].lower() == u'inherit':
printinherit = True
fields += u',organizationUnits(blockInheritance)'
org_attributes[0].update(InheritanceBlocked=u'InheritanceBlocked')
titles.append(u'InheritanceBlocked')
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print orgs"' % sys.argv[i]
sys.exit(2)
if fields:
org_attributes[0][u'Path'] = u'Path'
titles.append(u'Path')
sys.stderr.write(u"Retrieving All Organizational Units for your account (may take some time on large domain)...")
orgs = callGAPI(cd.orgunits(), u'list', customerId=GC_Values[GC_CUSTOMER_ID], fields=fields, type=listType, orgUnitPath=orgUnitPath)
sys.stderr.write(u"done\n")
if not u'organizationUnits' in orgs:
print u'0 org units in this Google Apps instance...'
return
for org_vals in orgs[u'organizationUnits']:
orgUnit = {}
if not fields:
orgUnit = flatten_json(org_vals)
for row in orgUnit:
if row not in titles:
titles.append(row)
org_attributes[0][row] = row
else:
orgUnit.update({u'Path': org_vals[u'orgUnitPath']})
if printname:
name = org_vals[u'name']
if name == None:
name = u''
orgUnit.update({u'Name': name})
if printdesc:
try:
desc = org_vals[u'description']
if desc == None:
desc = u''
except KeyError:
pass
orgUnit.update({u'Description': desc})
if printparent:
parent = org_vals[u'parentOrgUnitPath']
if parent == None:
parent = u''
orgUnit.update({u'Parent': parent})
if printinherit:
try:
orgUnit.update({u'InheritanceBlocked': org_vals[u'blockInheritance']})
except KeyError:
pass
org_attributes.append(orgUnit)
output_csv(org_attributes, titles, u'Orgs', todrive)
def doPrintAliases():
cd = buildGAPIObject(u'directory')
todrive = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print aliases"' % sys.argv[i]
sys.exit(2)
alias_attributes = []
alias_attributes.append({u'Alias': u'Alias'})
alias_attributes[0].update(Target=u'Target')
alias_attributes[0].update(TargetType=u'TargetType')
titles = [u'Alias', u'Target', u'TargetType']
sys.stderr.write(u"Retrieving All User Aliases for %s organization (may take some time on large domain)...\n" % GC_Values[GC_DOMAIN])
page_message = u'Got %%num_items%% users %%first_item%% - %%last_item%%\n'
all_users = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
message_attribute=u'primaryEmail', customer=GC_Values[GC_CUSTOMER_ID],
fields=u'users(primaryEmail,aliases),nextPageToken', maxResults=GC_Values[GC_USER_MAX_RESULTS])
for user in all_users:
try:
for alias in user[u'aliases']:
alias_attributes.append({u'Alias': alias, u'Target': user[u'primaryEmail'], u'TargetType': u'User'})
except KeyError:
continue
sys.stderr.write(u"Retrieving All User Aliases for %s organization (may take some time on large domain)...\n" % GC_Values[GC_DOMAIN])
page_message = u'Got %%num_items%% groups %%first_item%% - %%last_item%%\n'
all_groups = callGAPIpages(cd.groups(), u'list', u'groups', page_message=page_message,
message_attribute=u'email', customer=GC_Values[GC_CUSTOMER_ID],
fields=u'groups(email,aliases),nextPageToken')
for group in all_groups:
try:
for alias in group[u'aliases']:
alias_attributes.append({u'Alias': alias, u'Target': group[u'email'], u'TargetType': u'Group'})
except KeyError:
continue
output_csv(alias_attributes, titles, u'Aliases', todrive)
def doPrintGroupMembers():
cd = buildGAPIObject(u'directory')
todrive = all_groups = False
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() == u'group':
all_groups = [{u'email': sys.argv[i+1].lower()}]
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam print group-members"' % sys.argv[i]
sys.exit(2)
member_attributes = [{u'group': u'group'},]
if not all_groups:
all_groups = callGAPIpages(cd.groups(), u'list', u'groups', message_attribute=u'email',
customer=GC_Values[GC_CUSTOMER_ID], fields=u'nextPageToken,groups(email)')
total_groups = len(all_groups)
i = 1
for group in all_groups:
group_email = group[u'email']
sys.stderr.write(u'Getting members for %s (%s/%s)\n' % (group_email, i, total_groups))
group_members = callGAPIpages(cd.members(), u'list', u'members', message_attribute=u'email', groupKey=group_email)
for member in group_members:
member_attr = {u'group': group_email}
for title in member:
if title in [u'kind', u'etag']:
continue
try:
member_attributes[0][title]
except KeyError:
member_attributes[0][title] = title
member_attr[title] = member[title]
member_attributes.append(member_attr)
i += 1
titles = member_attributes[0].keys()
output_csv(member_attributes, titles, u'Group Members', todrive)
def doPrintMobileDevices():
cd = buildGAPIObject(u'directory')
mobile_attributes = [{}]
titles = []
todrive = False
query = orderBy = sortOrder = None
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'query':
query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() == u'orderby':
orderBy = sys.argv[i+1].lower()
allowed_values = [u'deviceid', u'email', u'lastsync', u'model', u'name', u'os', u'status', u'type']
if orderBy.lower() not in allowed_values:
print u'ERROR: orderBy must be one of %s. Got %s' % (u', u'.join(allowed_values), orderBy)
sys.exit(2)
elif orderBy == u'lastsync':
orderBy = u'lastSync'
elif orderBy == u'deviceid':
orderBy = u'deviceId'
i += 2
elif sys.argv[i].lower() in [u'ascending', u'descending']:
sortOrder = sys.argv[i].upper()
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print mobile"' % sys.argv[i]
sys.exit(2)
sys.stderr.write(u'Retrieving All Mobile Devices for organization (may take some time for large accounts)...\n')
page_message = u'Got %%num_items%% mobile devices...\n'
all_mobile = callGAPIpages(cd.mobiledevices(), u'list', u'mobiledevices', page_message=page_message,
customerId=GC_Values[GC_CUSTOMER_ID], query=query,
orderBy=orderBy, sortOrder=sortOrder, maxResults=GC_Values[GC_DEVICE_MAX_RESULTS])
for mobile in all_mobile:
mobiledevice = dict()
for title in mobile:
try:
if title in [u'kind', u'etag', u'applications']:
continue
try:
mobile_attributes[0][title]
except KeyError:
mobile_attributes[0][title] = title
titles.append(title)
if title in [u'name', u'email']:
mobiledevice[title] = mobile[title][0]
else:
mobiledevice[title] = mobile[title]
except KeyError:
pass
mobile_attributes.append(mobiledevice)
output_csv(mobile_attributes, titles, u'Mobile', todrive)
def doPrintCrosDevices():
cd = buildGAPIObject(u'directory')
cros_attributes = [{u'deviceId': u'deviceId'}]
titles = [u'deviceId',]
todrive = False
query = projection = orderBy = sortOrder = None
noLists = False
selectAttrib = None
i = 3
while i < len(sys.argv):
my_arg = sys.argv[i].lower().replace(u'_', u'')
if my_arg == u'query':
query = sys.argv[i+1]
i += 2
elif my_arg == u'todrive':
todrive = True
i += 1
elif my_arg == u'nolists':
noLists = True
selectAttrib = None
i += 1
elif my_arg == u'recentusers':
selectAttrib = u'recentUsers'
noLists = False
i += 1
elif my_arg in [u'timeranges', u'activetimeranges']:
selectAttrib = u'activeTimeRanges'
noLists = False
i += 1
elif my_arg == u'orderby':
orderBy = sys.argv[i+1].lower().replace(u'_', u'')
allowed_values = [u'location', u'user', u'lastsync', u'notes', u'serialnumber', u'status', u'supportenddate']
if orderBy not in allowed_values:
print u'ERROR: orderBy must be one of %s. Got %s' % (u', u'.join(allowed_values), orderBy)
sys.exit(2)
elif orderBy == u'location':
orderBy = u'annotatedLocation'
elif orderBy == u'user':
orderBy = u'annotatedUser'
elif orderBy == u'lastsync':
orderBy = u'lastSync'
elif orderBy == u'serialnumber':
orderBy = u'serialNumber'
elif orderBy == u'supportEndDate':
orderBy = u'supportEndDate'
i += 2
elif my_arg in [u'ascending', u'descending']:
sortOrder = my_arg.upper()
i += 1
elif my_arg in [u'basic', u'full']:
projection = my_arg.upper()
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print cros"' % sys.argv[i]
sys.exit(2)
if selectAttrib:
projection = u'FULL'
sys.stderr.write(u'Retrieving All Chrome OS Devices for organization (may take some time for large accounts)...\n')
page_message = u'Got %%num_items%% Chrome devices...\n'
all_cros = callGAPIpages(cd.chromeosdevices(), u'list', u'chromeosdevices', page_message=page_message,
query=query, customerId=GC_Values[GC_CUSTOMER_ID], projection=projection,
orderBy=orderBy, sortOrder=sortOrder, maxResults=GC_Values[GC_DEVICE_MAX_RESULTS])
if all_cros:
if (not noLists) and (not selectAttrib):
for cros in all_cros:
cros_attributes.append(flatten_json(cros))
for item in cros_attributes[-1]:
if item not in cros_attributes[0]:
cros_attributes[0][item] = item
titles.append(item)
else:
attribMap = dict()
for cros in all_cros:
row = dict()
for attrib in cros:
if attrib in [u'kind', u'etag', u'recentUsers', u'activeTimeRanges']:
continue
if attrib not in cros_attributes[0]:
cros_attributes[0][attrib] = attrib
titles.append(attrib)
row[attrib] = cros[attrib]
if noLists or (selectAttrib not in cros) or (not cros[selectAttrib]):
cros_attributes.append(row)
else:
if not attribMap:
for attrib in cros[selectAttrib][0]:
xattrib = u'%s.%s' % (selectAttrib, attrib)
if xattrib not in cros_attributes[0]:
cros_attributes[0][xattrib] = xattrib
titles.append(xattrib)
attribMap[attrib] = xattrib
for item in cros[selectAttrib]:
new_row = row.copy()
for attrib in item:
if isinstance(item[attrib], (bool, int)):
new_row[attribMap[attrib]] = str(item[attrib])
else:
new_row[attribMap[attrib]] = item[attrib]
cros_attributes.append(new_row)
output_csv(cros_attributes, titles, u'CrOS', todrive)
def doPrintLicenses(return_list=False, skus=None):
lic = buildGAPIObject(u'licensing')
products = [u'Google-Apps', u'Google-Drive-storage', u'Google-Coordinate', u'Google-Vault']
licenses = []
lic_attributes = [{}]
todrive = False
i = 3
while i < len(sys.argv) and not return_list:
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() in [u'products', u'product']:
products = sys.argv[i+1].split(u',')
i += 2
elif sys.argv[i].lower() in [u'sku', u'skus']:
skus = sys.argv[i+1].split(u',')
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam print licenses"' % sys.argv[i]
sys.exit(2)
if skus:
for sku in skus:
product, sku = getProductAndSKU(sku)
page_message = u'Got %%%%total_items%%%% Licenses for %s...\n' % sku
try:
licenses += callGAPIpages(lic.licenseAssignments(), u'listForProductAndSku', u'items', throw_reasons=[u'invalid', u'forbidden'], page_message=page_message,
customerId=GC_Values[GC_DOMAIN], productId=product, skuId=sku, fields=u'items(productId,skuId,userId),nextPageToken')
except googleapiclient.errors.HttpError:
licenses += []
else:
for productId in products:
page_message = u'Got %%%%total_items%%%% Licenses for %s...\n' % productId
try:
licenses += callGAPIpages(lic.licenseAssignments(), u'listForProduct', u'items', throw_reasons=[u'invalid', u'forbidden'], page_message=page_message,
customerId=GC_Values[GC_DOMAIN], productId=productId, fields=u'items(productId,skuId,userId),nextPageToken')
except googleapiclient.errors.HttpError:
licenses = +[]
for u_license in licenses:
a_license = dict()
for title in u_license:
if title in [u'kind', u'etags', u'selfLink']:
continue
if title not in lic_attributes[0]:
lic_attributes[0][title] = title
a_license[title] = u_license[title]
lic_attributes.append(a_license)
if return_list:
return lic_attributes
output_csv(lic_attributes, lic_attributes[0], u'Licenses', todrive)
def doPrintTokens():
cd = buildGAPIObject(u'directory')
todrive = False
i = 3
entity_type = u'all'
entity = u'users'
while i < len(sys.argv):
if sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() in usergroup_types:
entity_type = sys.argv[i].lower()
entity = sys.argv[i+1].lower()
i += 2
else:
print u'ERROR: %s is not a valid argument for "gam print tokens"' % sys.argv[i]
sys.exit(2)
all_users = getUsersToModify(entity_type=entity_type, entity=entity, silent=False)
titles = [u'user', u'displayText', u'clientId', u'nativeApp', u'anonymous', u'scopes']
token_attributes = [{}]
for title in titles:
token_attributes[0][title] = title
for user in all_users:
sys.stderr.write(u' getting tokens for %s\n' % user)
user_tokens = callGAPI(cd.tokens(), u'list', userKey=user)
try:
for user_token in user_tokens[u'items']:
this_token = dict()
this_token[u'user'] = user
this_token[u'scopes'] = u' '.join(user_token[u'scopes'])
for token_item in user_token:
if token_item in [u'kind', u'etag', u'scopes']:
continue
this_token[token_item] = user_token[token_item]
if token_item not in titles:
titles.append(token_item)
token_attributes[0][token_item] = token_item
token_attributes.append(this_token)
except KeyError:
pass
output_csv(token_attributes, titles, u'OAuth Tokens', todrive)
def doPrintResourceCalendars():
cd = buildGAPIObject(u'directory')
todrive = False
fields = [u'resourceId', u'resourceName', u'resourceEmail']
i = 3
while i < len(sys.argv):
if sys.argv[i].lower() == u'allfields':
fields = None
i += 1
elif sys.argv[i].lower() == u'todrive':
todrive = True
i += 1
elif sys.argv[i].lower() == u'id':
i += 1
elif sys.argv[i].lower() == u'description':
fields.append(u'resourceDescription')
i += 1
elif sys.argv[i].lower() == u'email':
i += 1
elif sys.argv[i].lower() == u'type':
fields.append(u'resourceType')
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam print resources"' % sys.argv[i]
sys.exit(2)
if fields:
fields = u'nextPageToken,items(%s)' % u','.join(fields)
sys.stderr.write(u"Retrieving All Resource Calendars for your account (may take some time on a large domain)\n")
page_message = u'Got %%total_items%% resources: %%first_item%% - %%last_item%%\n'
resources = callGAPIpages(cd.resources().calendars(), u'list', u'items',
page_message=page_message, message_attribute=u'resourceId',
customer=GC_Values[GC_CUSTOMER_ID], fields=fields, maxResults=500)
resources_attrib = [{u'resourceId':u'resourceId',
u'resourceEmail': u'resourceEmail',
u'resourceName': u'resourceName'}]
for resource in resources:
resource_attrib = {}
for key, value in resource.items():
if key in [u'kind', u'etags']:
continue
if key not in resources_attrib[0]:
resources_attrib[0][key] = key
resource_attrib[key] = value
resources_attrib.append(resource_attrib)
output_csv(resources_attrib, resources_attrib[0], u'Resources', todrive)
def doCreateMonitor():
source_user = sys.argv[4].lower()
destination_user = sys.argv[5].lower()
#end_date defaults to 30 days in the future...
end_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime(u"%Y-%m-%d %H:%M")
begin_date = None
incoming_headers_only = outgoing_headers_only = drafts_headers_only = chats_headers_only = False
drafts = chats = True
i = 6
while i < len(sys.argv):
if sys.argv[i].lower() == u'end':
end_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'begin':
begin_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'incoming_headers':
incoming_headers_only = True
i += 1
elif sys.argv[i].lower() == u'outgoing_headers':
outgoing_headers_only = True
i += 1
elif sys.argv[i].lower() == u'nochats':
chats = False
i += 1
elif sys.argv[i].lower() == u'nodrafts':
drafts = False
i += 1
elif sys.argv[i].lower() == u'chat_headers':
chats_headers_only = True
i += 1
elif sys.argv[i].lower() == u'draft_headers':
drafts_headers_only = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam create monitor"' % sys.argv[i]
sys.exit(2)
audit = getAuditObject()
if source_user.find(u'@') > 0:
audit.domain = source_user[source_user.find(u'@')+1:]
source_user = source_user[:source_user.find(u'@')]
callGData(audit, u'createEmailMonitor', source_user=source_user, destination_user=destination_user, end_date=end_date, begin_date=begin_date,
incoming_headers_only=incoming_headers_only, outgoing_headers_only=outgoing_headers_only,
drafts=drafts, drafts_headers_only=drafts_headers_only, chats=chats, chats_headers_only=chats_headers_only)
def doShowMonitors():
user = sys.argv[4].lower()
audit = getAuditObject()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
results = callGData(audit, u'getEmailMonitors', user=user)
print sys.argv[4].lower()+u' has the following monitors:'
print u''
for monitor in results:
print u' Destination: '+monitor[u'destUserName']
try:
print u' Begin: '+monitor[u'beginDate']
except KeyError:
print u' Begin: immediately'
print u' End: '+monitor[u'endDate']
print u' Monitor Incoming: '+monitor[u'outgoingEmailMonitorLevel']
print u' Monitor Outgoing: '+monitor[u'incomingEmailMonitorLevel']
print u' Monitor Chats: '+monitor[u'chatMonitorLevel']
print u' Monitor Drafts: '+monitor[u'draftMonitorLevel']
print u''
def doDeleteMonitor():
source_user = sys.argv[4].lower()
destination_user = sys.argv[5].lower()
audit = getAuditObject()
if source_user.find(u'@') > 0:
audit.domain = source_user[source_user.find(u'@')+1:]
source_user = source_user[:source_user.find(u'@')]
callGData(audit, u'deleteEmailMonitor', source_user=source_user, destination_user=destination_user)
def doRequestActivity():
user = sys.argv[4].lower()
audit = getAuditObject()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
results = callGData(audit, u'createAccountInformationRequest', user=user)
print u'Request successfully submitted:'
print u' Request ID: '+results[u'requestId']
print u' User: '+results[u'userEmailAddress']
print u' Status: '+results[u'status']
print u' Request Date: '+results[u'requestDate']
print u' Requested By: '+results[u'adminEmailAddress']
def doStatusActivityRequests():
audit = getAuditObject()
try:
user = sys.argv[4].lower()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
request_id = sys.argv[5].lower()
results = callGData(audit, u'getAccountInformationRequestStatus', user=user, request_id=request_id)
print u''
print u' Request ID: '+results[u'requestId']
print u' User: '+results[u'userEmailAddress']
print u' Status: '+results[u'status']
print u' Request Date: '+results[u'requestDate']
print u' Requested By: '+results[u'adminEmailAddress']
try:
print u' Number Of Files: '+results[u'numberOfFiles']
for i in range(int(results[u'numberOfFiles'])):
print u' Url%s: %s' % (i, results[u'fileUrl%s' % i])
except KeyError:
pass
print u''
except IndexError:
results = callGData(audit, u'getAllAccountInformationRequestsStatus')
print u'Current Activity Requests:'
print u''
for request in results:
print u' Request ID: '+request[u'requestId']
print u' User: '+request[u'userEmailAddress']
print u' Status: '+request[u'status']
print u' Request Date: '+request[u'requestDate']
print u' Requested By: '+request[u'adminEmailAddress']
try:
print u' Number Of Files: '+request[u'numberOfFiles']
for i in range(int(request[u'numberOfFiles'])):
print u' Url%s: %s' % (i, request[u'fileUrl%s' % i])
except KeyError:
pass
print u''
def doDownloadActivityRequest():
user = sys.argv[4].lower()
request_id = sys.argv[5].lower()
audit = getAuditObject()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
results = callGData(audit, u'getAccountInformationRequestStatus', user=user, request_id=request_id)
if results[u'status'] != u'COMPLETED':
systemErrorExit(4, MESSAGE_REQUEST_NOT_COMPLETE.format(results[u'status']))
if int(results.get(u'numberOfFiles', u'0')) < 1:
systemErrorExit(4, MESSAGE_REQUEST_COMPLETED_NO_FILES)
for i in range(0, int(results[u'numberOfFiles'])):
url = results[u'fileUrl'+str(i)]
filename = u'activity-'+user+'-'+request_id+'-'+unicode(i)+u'.txt.gpg'
print u'Downloading '+filename+u' ('+unicode(i+1)+u' of '+results[u'numberOfFiles']+')'
geturl(url, filename)
def doRequestExport():
begin_date = end_date = search_query = None
headers_only = include_deleted = False
user = sys.argv[4].lower()
i = 5
while i < len(sys.argv):
if sys.argv[i].lower() == u'begin':
begin_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'end':
end_date = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'search':
search_query = sys.argv[i+1]
i += 2
elif sys.argv[i].lower() == u'headersonly':
headers_only = True
i += 1
elif sys.argv[i].lower() == u'includedeleted':
include_deleted = True
i += 1
else:
print u'ERROR: %s is not a valid argument for "gam export request"' % sys.argv[i]
sys.exit(2)
audit = getAuditObject()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
results = callGData(audit, u'createMailboxExportRequest', user=user, begin_date=begin_date, end_date=end_date, include_deleted=include_deleted,
search_query=search_query, headers_only=headers_only)
print u'Export request successfully submitted:'
print u' Request ID: '+results[u'requestId']
print u' User: '+results[u'userEmailAddress']
print u' Status: '+results[u'status']
print u' Request Date: '+results[u'requestDate']
print u' Requested By: '+results[u'adminEmailAddress']
print u' Include Deleted: '+results[u'includeDeleted']
print u' Requested Parts: '+results[u'packageContent']
try:
print u' Begin: '+results[u'beginDate']
except KeyError:
print u' Begin: account creation date'
try:
print u' End: '+results[u'endDate']
except KeyError:
print u' End: export request date'
def doDeleteExport():
audit = getAuditObject()
user = sys.argv[4].lower()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
request_id = sys.argv[5].lower()
callGData(audit, u'deleteMailboxExportRequest', user=user, request_id=request_id)
def doDeleteActivityRequest():
audit = getAuditObject()
user = sys.argv[4].lower()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
request_id = sys.argv[5].lower()
callGData(audit, u'deleteAccountInformationRequest', user=user, request_id=request_id)
def doStatusExportRequests():
audit = getAuditObject()
try:
user = sys.argv[4].lower()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
request_id = sys.argv[5].lower()
results = callGData(audit, u'getMailboxExportRequestStatus', user=user, request_id=request_id)
print u''
print u' Request ID: '+results[u'requestId']
print u' User: '+results[u'userEmailAddress']
print u' Status: '+results[u'status']
print u' Request Date: '+results[u'requestDate']
print u' Requested By: '+results[u'adminEmailAddress']
print u' Requested Parts: '+results[u'packageContent']
try:
print u' Request Filter: '+results[u'searchQuery']
except KeyError:
print u' Request Filter: None'
print u' Include Deleted: '+results[u'includeDeleted']
try:
print u' Number Of Files: '+results[u'numberOfFiles']
for i in range(int(results[u'numberOfFiles'])):
print u' Url%s: %s' % (i, results[u'fileUrl%s' % i])
except KeyError:
pass
except IndexError:
results = callGData(audit, u'getAllMailboxExportRequestsStatus')
print u'Current Export Requests:'
print u''
for request in results:
print u' Request ID: '+request[u'requestId']
print u' User: '+request[u'userEmailAddress']
print u' Status: '+request[u'status']
print u' Request Date: '+request[u'requestDate']
print u' Requested By: '+request[u'adminEmailAddress']
print u' Requested Parts: '+request[u'packageContent']
try:
print u' Request Filter: '+request[u'searchQuery']
except KeyError:
print u' Request Filter: None'
print u' Include Deleted: '+request[u'includeDeleted']
try:
print u' Number Of Files: '+request[u'numberOfFiles']
except KeyError:
pass
print u''
def doWatchExportRequest():
audit = getAuditObject()
user = sys.argv[4].lower()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
request_id = sys.argv[5].lower()
while True:
results = callGData(audit, u'getMailboxExportRequestStatus', user=user, request_id=request_id)
if results[u'status'] != u'PENDING':
print u'status is %s. Sending email.' % results[u'status']
msg_txt = u"\n"
msg_txt += u" Request ID: %s\n" % results[u'requestId']
msg_txt += u" User: %s\n" % results[u'userEmailAddress']
msg_txt += u" Status: %s\n" % results[u'status']
msg_txt += u" Request Date: %s\n" % results[u'requestDate']
msg_txt += u" Requested By: %s\n" % results[u'adminEmailAddress']
msg_txt += u" Requested Parts: %s\n" % results[u'packageContent']
try:
msg_txt += u" Request Filter: %s\n" % results[u'searchQuery']
except KeyError:
msg_txt += u" Request Filter: None\n"
msg_txt += u" Include Deleted: %s\n" % results[u'includeDeleted']
try:
msg_txt += u" Number Of Files: %s\n" % results[u'numberOfFiles']
for i in range(int(results[u'numberOfFiles'])):
msg_txt += u" Url%s: %s\n" % (i, results[u'fileUrl%s' % i])
except KeyError:
pass
msg_subj = u'Export #%s for %s status is %s' % (results[u'requestId'], results[u'userEmailAddress'], results[u'status'])
send_email(msg_subj, msg_txt)
break
else:
print u'status still PENDING, will check again in 5 minutes...'
time.sleep(300)
def send_email(msg_subj, msg_txt, msg_rcpt=None):
from email.mime.text import MIMEText
gmail = buildGAPIObject(u'gmail')
sender_email = gmail._http.request.credentials.id_token[u'email']
if not msg_rcpt:
msg_rcpt = sender_email
msg = MIMEText(msg_txt)
msg[u'Subject'] = msg_subj
msg[u'From'] = sender_email
msg[u'To'] = msg_rcpt
msg_string = msg.as_string()
msg_raw = base64.urlsafe_b64encode(msg_string)
callGAPI(gmail.users().messages(), u'send', userId=sender_email, body={u'raw': msg_raw})
def doDownloadExportRequest():
user = sys.argv[4].lower()
request_id = sys.argv[5].lower()
audit = getAuditObject()
if user.find(u'@') > 0:
audit.domain = user[user.find(u'@')+1:]
user = user[:user.find(u'@')]
results = callGData(audit, u'getMailboxExportRequestStatus', user=user, request_id=request_id)
if results[u'status'] != u'COMPLETED':
systemErrorExit(4, MESSAGE_REQUEST_NOT_COMPLETE.format(results[u'status']))
if int(results.get(u'numberOfFiles', u'0')) < 1:
systemErrorExit(4, MESSAGE_REQUEST_COMPLETED_NO_FILES)
for i in range(0, int(results[u'numberOfFiles'])):
url = results[u'fileUrl'+str(i)]
filename = u'export-'+user+'-'+request_id+'-'+str(i)+u'.mbox.gpg'
#don't download existing files. This does not check validity of existing local
#file so partial/corrupt downloads will need to be deleted manually.
if os.path.isfile(filename):
continue
print u'Downloading '+filename+u' ('+unicode(i+1)+u' of '+results[u'numberOfFiles']+')'
geturl(url, filename)
def doUploadAuditKey():
auditkey = sys.stdin.read()
audit = getAuditObject()
callGData(audit, u'updatePGPKey', pgpkey=auditkey)
def getUsersToModify(entity_type=None, entity=None, silent=False, return_uids=False, member_type=None):
got_uids = False
if entity_type == None:
entity_type = sys.argv[1].lower()
if entity == None:
entity = sys.argv[2]
cd = buildGAPIObject(u'directory')
if entity_type == u'user':
users = [entity,]
elif entity_type == u'users':
users = entity.replace(u',', u' ').split()
elif entity_type == u'group':
got_uids = True
group = entity
if member_type == None:
member_type_message = u'all members'
else:
member_type_message = u'%ss' % member_type.lower()
if group.find(u'@') == -1:
group = u'%s@%s' % (group, GC_Values[GC_DOMAIN])
page_message = None
if not silent:
sys.stderr.write(u"Getting %s of %s (may take some time for large groups)..." % (member_type_message, group))
page_message = u'Got %%%%total_items%%%% %s...' % member_type_message
members = callGAPIpages(cd.members(), u'list', u'members', page_message=page_message,
groupKey=group, roles=member_type, fields=u'nextPageToken,members(email,id)')
users = []
for member in members:
if return_uids:
users.append(member[u'id'])
else:
users.append(member[u'email'])
elif entity_type in [u'ou', u'org']:
got_uids = True
ou = entity
if ou[0] != u'/':
ou = u'/%s' % ou
users = []
page_message = None
if not silent:
sys.stderr.write(u"Getting all users in the Google Apps organization (may take some time on a large domain)...\n")
page_message = u'Got %%total_items%% users...'
members = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
customer=GC_Values[GC_CUSTOMER_ID], fields=u'nextPageToken,users(primaryEmail,id,orgUnitPath)',
query=u"orgUnitPath='%s'" % ou, maxResults=GC_Values[GC_USER_MAX_RESULTS])
for member in members:
if ou.lower() != member[u'orgUnitPath'].lower():
continue
if return_uids:
users.append(member[u'id'])
else:
users.append(member[u'primaryEmail'])
if not silent:
sys.stderr.write(u"%s users are directly in the OU.\n" % len(users))
elif entity_type in [u'ou_and_children', u'ou_and_child']:
got_uids = True
ou = entity
if ou[0] != u'/':
ou = u'/%s' % ou
users = []
page_message = None
if not silent:
sys.stderr.write(u"Getting all users in the Google Apps organization (may take some time on a large domain)...\n")
page_message = u'Got %%total_items%% users..'
members = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
customer=GC_Values[GC_CUSTOMER_ID], fields=u'nextPageToken,users(primaryEmail,id)',
query=u"orgUnitPath='%s'" % ou, maxResults=GC_Values[GC_USER_MAX_RESULTS])
for member in members:
if return_uids:
users.append(member[u'id'])
else:
users.append(member[u'primaryEmail'])
if not silent:
sys.stderr.write(u"done.\r\n")
elif entity_type in [u'query',]:
got_uids = True
users = []
if not silent:
sys.stderr.write(u"Getting all users that match query %s (may take some time on a large domain)...\n" % entity)
page_message = u'Got %%total_items%% users...'
members = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
customer=GC_Values[GC_CUSTOMER_ID], fields=u'nextPageToken,users(primaryEmail,id)',
query=entity, maxResults=GC_Values[GC_USER_MAX_RESULTS])
for member in members:
if return_uids:
users.append(member[u'id'])
else:
users.append(member[u'primaryEmail'])
if not silent:
sys.stderr.write(u"done.\r\n")
elif entity_type in [u'license', u'licenses', u'licence', u'licences']:
users = []
licenses = doPrintLicenses(return_list=True, skus=entity.split(u','))
for row in licenses[1:]: # skip header
try:
users.append(row[u'userId'])
except KeyError:
pass
elif entity_type == u'file':
users = []
filename = entity
usernames = csv.reader(open(filename, u'rb'))
for row in usernames:
try:
users.append(row.pop())
except IndexError:
pass
elif entity_type in [u'courseparticipants', u'teachers', u'students']:
croom = buildGAPIObject(u'classroom')
users = []
if not entity.isdigit() and entity[:2] != u'd:':
entity = u'd:%s' % entity
if entity_type in [u'courseparticipants', u'teachers']:
page_message = u'Got %%total_items%% teachers...'
teachers = callGAPIpages(croom.courses().teachers(), u'list', u'teachers', page_message=page_message, courseId=entity)
for teacher in teachers:
email = teacher[u'profile'].get(u'emailAddress', None)
if email:
users.append(email)
if entity_type in [u'courseparticipants', u'students']:
page_message = u'Got %%total_items%% students...'
students = callGAPIpages(croom.courses().students(), u'list', u'students', page_message=page_message, courseId=entity)
for student in students:
email = student[u'profile'].get(u'emailAddress', None)
if email:
users.append(email)
elif entity_type == u'all':
got_uids = True
users = []
if entity.lower() == u'users':
if not silent:
sys.stderr.write(u"Getting all users in Google Apps account (may take some time on a large account)...\n")
page_message = u'Got %%total_items%% users...'
all_users = callGAPIpages(cd.users(), u'list', u'users', page_message=page_message,
customer=GC_Values[GC_CUSTOMER_ID],
fields=u'nextPageToken,users(primaryEmail,suspended,id)', maxResults=GC_Values[GC_USER_MAX_RESULTS])
for member in all_users:
if member[u'suspended'] == False:
if return_uids:
users.append(member[u'id'])
else:
users.append(member[u'primaryEmail'])
if not silent:
sys.stderr.write(u"done getting %s users.\r\n" % len(users))
elif entity.lower() == u'cros':
if not silent:
sys.stderr.write(u"Getting all CrOS devices in Google Apps account (may take some time on a large account)...\n")
all_cros = callGAPIpages(cd.chromeosdevices(), u'list', u'chromeosdevices',
customerId=GC_Values[GC_CUSTOMER_ID], fields=u'nextPageToken,chromeosdevices(deviceId)',
maxResults=GC_Values[GC_DEVICE_MAX_RESULTS])
for member in all_cros:
users.append(member[u'deviceId'])
if not silent:
sys.stderr.write(u"done getting %s CrOS devices.\r\n" % len(users))
else:
print u'ERROR: %s is not a valid argument for "gam all"' % entity
sys.exit(3)
elif entity_type == u'cros':
users = entity.replace(u',', u' ').split()
entity = u'cros'
else:
print u'ERROR: %s is not a valid argument for "gam"' % entity_type
sys.exit(2)
full_users = list()
if entity != u'cros' and not got_uids:
for user in users:
if user[:4] == u'uid:':
full_users.append(user[4:])
elif user.find(u'@') == -1:
full_users.append(u'%s@%s' % (user, GC_Values[GC_DOMAIN]))
else:
full_users.append(user)
else:
full_users = users
if return_uids and not got_uids:
new_full_users = list()
for user in full_users:
user_result = callGAPI(cd.users(), u'get', userKey=user, fields=u'id')
new_full_users.append(user_result[u'id'])
full_users = new_full_users
return full_users
def OAuthInfo():
configRequired = False
print u'API Access, Admin: {0}'.format(GM_Globals[GM_ADMIN])
i = 0
for api in sorted(API_VER_MAPPING.keys()):
i += 1
_, http, service = getAPIversionHttpService(api)
api_scopes = service._rootDesc[u'auth'][u'oauth2'][u'scopes']
requested_scopes = list(set(api_scopes.keys()).intersection(GM_Globals[GM_GAMSCOPES_LIST]))
print u' %2d) %s (%d/%d scopes)' % (i, service._rootDesc[u'title'].replace(u'Google ', u''), len(requested_scopes), len(api_scopes))
if requested_scopes:
for scope in requested_scopes:
credentials = oauth2client.client.SignedJwtAssertionCredentials(GM_Globals[GM_OAUTH2SERVICE_ACCOUNT_EMAIL],
GM_Globals[GM_OAUTH2SERVICE_KEY],
scope=scope, user_agent=GAM_INFO, sub=GM_Globals[GM_ADMIN])
try:
service._http = credentials.refresh(http)
status = u'Authorized'
except oauth2client.client.AccessTokenRefreshError, e:
configRequired = True
if e.message in OAUTH_TOKEN_ERRORS:
status = u'DENIED'
else:
status = u'{0}Authentication Token Error - {1}'.format(ERROR_PREFIX, e)
print u' {0}\n {1}\n Access: {2}'.format(api_scopes[scope][u'description'], scope, status)
else:
print u' Access: Not requested'
if configRequired:
print MESSAGE_API_ACCESS_CONFIG
def doDeleteOAuth():
sys.stdout.write(u'Scopes file: {0}, will be Deleted in 3...'.format(GC_Values[GC_GAMSCOPES_JSON]))
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(u'2...')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(u'1...')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(u'boom!\n')
sys.stdout.flush()
try:
os.remove(GC_Values[GC_GAMSCOPES_JSON])
sys.stdout.write(u'Scopes file: {0}, Deleted\n'.format(GC_Values[GC_GAMSCOPES_JSON]))
except OSError as e:
sys.stderr.write(u'{0}{1}\n'.format(WARNING_PREFIX, e))
EMAIL_PATTERN = re.compile(r'^(.+)@(.+\..+)$')
EMAIL_FORMAT_REQUIRED = u'<Name>@<Domain>'
UBER_SCOPES = {u'gmail-v1': [u'https://mail.google.com/'],}
def doRequestOAuth():
def _setAdminDomain(value):
ema = EMAIL_PATTERN.match(value)
if ema:
GM_Globals[GM_ADMIN] = value
if not GC_Values[GC_DOMAIN]:
GC_Values[GC_DOMAIN] = ema.group(2)
return True
print u'{0}Admin email address must be: {1}'.format(ERROR_PREFIX, EMAIL_FORMAT_REQUIRED)
return False
def _getAdminDomain():
print u''
while True:
value = raw_input(u'Enter Admin email address: ').strip().lower()
if _setAdminDomain(value):
return
def _select_default_scopes(apis):
for api_name, api in apis.items():
if api_name in UBER_SCOPES:
api[u'use_scopes'] = UBER_SCOPES[api_name]
else:
scopes = sorted(api[u'auth'][u'oauth2'][u'scopes'].keys())
api[u'use_scopes'] = []
# reduce # of scopes by checking if a scope is a substring of another
# which should mean it covers same API operations. Add a . at end
# to prevent things like directory.users removing directory.userschema
i = 0
count = len(scopes)
while i < count:
scope = scopes[i]
api[u'use_scopes'].append(scope)
i += 1
scope += u'.'
while (i < count) and scopes[i].startswith(scope):
if scopes[i].endswith(u'.security'):
api[u'use_scopes'].append(scopes[i])
i += 1
def _getSelection(limit):
while True:
selection = raw_input(u'Your selection: ')
if selection:
if selection.isdigit():
selection = int(selection)
if (selection >= 0) and (selection <= limit):
return selection
print u'ERROR: enter number in range 0-{0}'.format(limit)
else:
print u'ERROR: please enter numbers only'
if len(sys.argv) > 3:
GM_Globals[GM_ADMIN] = u''
if not _setAdminDomain(sys.argv[3].lower()):
_getAdminDomain()
elif not GM_Globals[GM_ADMIN]:
_getAdminDomain()
all_apis = {}
api_titles = {}
for api in API_VER_MAPPING.keys():
api_version, _, service = getAPIversionHttpService(api)
all_apis[api_version] = service._rootDesc
all_apis[api_version][u'title'] = all_apis[api_version][u'title'].replace(u'Google ', u'')
api_titles[all_apis[api_version][u'title']] = api_version
api_index = []
for _, api_version in sorted(api_titles.items()):
api_index.append(api_version)
i = len(api_index)
if GM_Globals[GM_GAMSCOPES_LIST]:
for api in all_apis:
all_apis[api][u'use_scopes'] = list(set(all_apis[api][u'auth'][u'oauth2'][u'scopes'].keys()).intersection(GM_Globals[GM_GAMSCOPES_LIST]))
else:
_select_default_scopes(all_apis)
if not GM_Globals[GM_ADMIN]:
_getAdminDomain()
while True:
#os.system([u'clear', u'cls'][GM_Globals[GM_WINDOWS]])
print u'Select the APIs to use with GAM.'
print
for n in range(i):
api = all_apis[api_index[n]]
api_scopes = api[u'auth'][u'oauth2'][u'scopes'].keys()
num_scopes_selected = len(api[u'use_scopes'])
num_scopes_total = len(api_scopes)
if num_scopes_selected > 0:
select_value = u'*'
else:
select_value = u' '
print u'[%s] %2d) %s (%d/%d scopes)' % (select_value, n, api[u'title'], num_scopes_selected, num_scopes_total)
print
print u' %2d) Select defaults for all APIs (allow all GAM commands)' % (i)
print u' %2d) Unselect all APIs' % (i+1)
print u' %2d) Cancel' % (i+2)
print u' %2d) Continue' % (i+3)
print
selection = _getSelection(i+3)
if selection == i: # defaults
_select_default_scopes(all_apis)
elif selection == i+1: # unselect all
for api in all_apis.keys():
all_apis[api][u'use_scopes'] = []
elif selection == i+3: # continue
GM_Globals[GM_GAMSCOPES_LIST] = []
for api in all_apis.keys():
GM_Globals[GM_GAMSCOPES_LIST] += all_apis[api][u'use_scopes']
GM_Globals[GM_GAMSCOPES_LIST] = list(set(GM_Globals[GM_GAMSCOPES_LIST]))
if not GM_Globals[GM_GAMSCOPES_LIST]:
print u'YOU MUST SELECT AT LEAST ONE SCOPE'
continue
writeFile(GC_Values[GC_GAMSCOPES_JSON], json.dumps({u'scopes': GM_Globals[GM_GAMSCOPES_LIST],
u'admin': GM_Globals[GM_ADMIN],
u'domain': GC_Values[GC_DOMAIN]}))
print u'Scopes file: {0}, Created'.format(GC_Values[GC_GAMSCOPES_JSON])
print MESSAGE_PLEASE_AUTHORIZE_SERVICE_ACCOUNT.format(len(GM_Globals[GM_GAMSCOPES_LIST]), u','.join(GM_Globals[GM_GAMSCOPES_LIST]))
return
elif selection == i+2: # cancel
return
else: # select
api = api_index[selection]
api_scopes = sorted(all_apis[api][u'auth'][u'oauth2'][u'scopes'].keys())
if len(api_scopes) == 1:
if len(all_apis[api][u'use_scopes']) == 1:
all_apis[api][u'use_scopes'] = []
else:
all_apis[api][u'use_scopes'] = api_scopes
else:
while True:
#os.system([u'clear', u'cls'][GM_Globals[GM_WINDOWS]])
print
x = 0
for scope in api_scopes:
if scope in all_apis[api][u'use_scopes']:
select_value = u'*'
else:
select_value = u' '
print u'[%s] %2d) %s\n %s' % (select_value, x, all_apis[api][u'auth'][u'oauth2'][u'scopes'][scope][u'description'], scope)
x += 1
print
print u' %2d) Select defaults for this API (allow all GAM commands)' % (x)
print u' %2d) Select read-only scopes' % (x+1)
print u' %2d) Unselect all scopes' % (x+2)
print u' %2d) Cancel' % (x+3)
print u' %2d) Back to all APIs' % (x+4)
print
selection = _getSelection(x+4)
if selection < x: # select
if api_scopes[selection] in all_apis[api][u'use_scopes']:
all_apis[api][u'use_scopes'].remove(api_scopes[selection])
else:
all_apis[api][u'use_scopes'].append(api_scopes[selection])
elif selection == x: # defaults
just_this_api = {api: all_apis[api]}
_select_default_scopes(just_this_api)
all_apis[api][u'use_scopes'] = just_this_api[api][u'use_scopes']
elif selection == x+1: # read-only
all_apis[api][u'use_scopes'] = []
for scope in api_scopes:
if scope.endswith(u'.readonly'):
all_apis[api][u'use_scopes'].append(scope)
elif selection == x+2: # unselect all
all_apis[api][u'use_scopes'] = []
elif selection == x+4: # back
break
else: # cancel
return
def batch_worker():
while True:
item = GM_Globals[GM_BATCH_QUEUE].get()
subprocess.call(item, stderr=subprocess.STDOUT)
GM_Globals[GM_BATCH_QUEUE].task_done()
def run_batch(items):
import Queue, threading
total_items = len(items)
current_item = 0
python_cmd = [sys.executable.lower(),]
if not getattr(sys, u'frozen', False): # we're not frozen
python_cmd.append(os.path.realpath(sys.argv[0]))
num_worker_threads = min(total_items, GC_Values[GC_NUM_THREADS])
GM_Globals[GM_BATCH_QUEUE] = Queue.Queue(maxsize=num_worker_threads) # GM_Globals[GM_BATCH_QUEUE].put() gets blocked when trying to create more items than there are workers
print u'starting %s worker threads...' % num_worker_threads
for i in range(num_worker_threads):
t = threading.Thread(target=batch_worker)
t.daemon = True
t.start()
for item in items:
current_item += 1
if not current_item % 100:
print u'starting job %s / %s' % (current_item, total_items)
if item[0] == u'commit-batch':
sys.stderr.write(u'commit-batch - waiting for running processes to finish before proceeding...')
GM_Globals[GM_BATCH_QUEUE].join()
sys.stderr.write(u'done with commit-batch\n')
continue
GM_Globals[GM_BATCH_QUEUE].put(python_cmd+item)
GM_Globals[GM_BATCH_QUEUE].join()
#
# Process command line arguments, find substitutions
# An argument containing instances of ~~xxx~~ has xxx replaced by the value of field xxx from the CSV file
# An argument containing exactly ~xxx is replaced by the value of field xxx from the CSV file
# Otherwise, the argument is preserved as is
#
# SubFields is a dictionary; the key is the argument number, the value is a list of tuples that mark
# the substition (fieldname, start, end).
# Example: update user '~User' address type work unstructured '~~Street~~, ~~City~~, ~~State~~ ~~ZIP~~' primary
# {2: [('User', 0, 5)], 7: [('Street', 0, 10), ('City', 12, 20), ('State', 22, 31), ('ZIP', 32, 39)]}
#
def getSubFields(i, fieldNames):
subFields = {}
PATTERN = re.compile(r'~~(.+?)~~')
GAM_argv = []
GAM_argvI = 0
while i < len(sys.argv):
myarg = sys.argv[i]
if PATTERN.search(myarg):
pos = 0
while True:
match = PATTERN.search(myarg, pos)
if not match:
break
fieldName = match.group(1)
if fieldName in fieldNames:
subFields.setdefault(GAM_argvI, [])
subFields[GAM_argvI].append((fieldName, match.start(), match.end()))
else:
csvFieldErrorExit(fieldName, fieldNames)
pos = match.end()
GAM_argv.append(myarg)
elif myarg[0] == u'~':
fieldName = myarg[1:]
if fieldName in fieldNames:
subFields[GAM_argvI] = [(fieldName, 0, len(myarg))]
GAM_argv.append(myarg)
else:
csvFieldErrorExit(fieldName, fieldNames)
else:
GAM_argv.append(myarg)
GAM_argvI += 1
i += 1
return(GAM_argv, subFields)
#
def processSubFields(GAM_argv, row, subFields):
argv = GAM_argv[:]
for GAM_argvI, fields in subFields.iteritems():
oargv = argv[GAM_argvI][:]
argv[GAM_argvI] = u''
pos = 0
for field in fields:
argv[GAM_argvI] += oargv[pos:field[1]]
if row[field[0]]:
argv[GAM_argvI] += row[field[0]]
pos = field[2]
argv[GAM_argvI] += oargv[pos:]
return argv
# Main
reload(sys)
sys.setdefaultencoding(u'UTF-8')
try:
if GM_Globals[GM_WINDOWS]:
sys.argv = win32_unicode_argv() # cleanup sys.argv on Windows
SetGlobalVariables()
if sys.argv[1].lower() == u'batch':
import shlex
filename = sys.argv[2]
if (filename == u'-') and (GC_Values[GC_DEBUG_LEVEL] > 0):
systemErrorExit(2, MESSAGE_BATCH_CSV_DASH_DEBUG_INCOMPATIBLE.format(u'batch'))
f = openFile(filename)
items = list()
for line in f:
argv = shlex.split(line)
if not argv:
continue
if (argv[0] in [u'#', u' ', u''] or len(argv) < 2) and argv != [u'commit-batch']:
continue
elif argv[0] not in [u'gam', u'commit-batch']:
print u'ERROR: "%s" is not a valid gam command' % line
continue
if argv[0] == u'gam':
argv = argv[1:]
items.append(argv)
closeFile(f)
run_batch(items)
sys.exit(0)
elif sys.argv[1].lower() == u'csv':
filename = sys.argv[2]
if (filename == u'-') and (GC_Values[GC_DEBUG_LEVEL] > 0):
systemErrorExit(2, MESSAGE_BATCH_CSV_DASH_DEBUG_INCOMPATIBLE.format(u'csv'))
f = openFile(filename)
input_file = csv.DictReader(f)
if sys.argv[3].lower() != u'gam':
print u'ERROR: "gam csv <filename>" should be followed by a full GAM command...'
sys.exit(3)
GAM_argv, subFields = getSubFields(4, input_file.fieldnames)
items = list()
for row in input_file:
items.append(processSubFields(GAM_argv, row, subFields))
closeFile(f)
run_batch(items)
sys.exit(0)
elif sys.argv[1].lower() == u'version':
doGAMVersion()
sys.exit(0)
elif sys.argv[1].lower() == u'create':
if sys.argv[2].lower() == u'user':
doCreateUser()
elif sys.argv[2].lower() == u'group':
doCreateGroup()
elif sys.argv[2].lower() in [u'nickname', u'alias']:
doCreateAlias()
elif sys.argv[2].lower() in [u'org', u'ou']:
doCreateOrg()
elif sys.argv[2].lower() == u'resource':
doCreateResourceCalendar()
elif sys.argv[2].lower() in [u'verify', u'verification']:
doSiteVerifyShow()
elif sys.argv[2].lower() in [u'schema']:
doCreateOrUpdateUserSchema()
elif sys.argv[2].lower() in [u'course', u'class']:
doCreateCourse()
elif sys.argv[2].lower() in [u'transfer', u'datatransfer']:
doCreateDataTranfer()
elif sys.argv[2].lower() in [u'domain',]:
doCreateDomain()
elif sys.argv[2].lower() in [u'domainalias', u'aliasdomain']:
doCreateDomainAlias()
elif sys.argv[2].lower() in [u'admin']:
doCreateAdmin()
else:
print u'ERROR: %s is not a valid argument for "gam create"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'update':
if sys.argv[2].lower() == u'user':
doUpdateUser([sys.argv[3],], 4)
elif sys.argv[2].lower() == u'group':
doUpdateGroup()
elif sys.argv[2].lower() in [u'nickname', u'alias']:
doUpdateAlias()
elif sys.argv[2].lower() in [u'ou', u'org']:
doUpdateOrg()
elif sys.argv[2].lower() == u'resource':
doUpdateResourceCalendar()
elif sys.argv[2].lower() == u'instance':
doUpdateInstance()
elif sys.argv[2].lower() == u'cros':
doUpdateCros()
elif sys.argv[2].lower() == u'mobile':
doUpdateMobile()
elif sys.argv[2].lower() in [u'notification', u'notifications']:
doUpdateNotification()
elif sys.argv[2].lower() in [u'verify', u'verification']:
doSiteVerifyAttempt()
elif sys.argv[2].lower() in [u'schema', u'schemas']:
doCreateOrUpdateUserSchema()
elif sys.argv[2].lower() in [u'course', u'class']:
doUpdateCourse()
elif sys.argv[2].lower() in [u'printer', u'print']:
doUpdatePrinter()
elif sys.argv[2].lower() in [u'domain',]:
doUpdateDomain()
elif sys.argv[2].lower() in [u'customer',]:
doUpdateCustomer()
else:
print u'ERROR: %s is not a valid argument for "gam update"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'info':
if sys.argv[2].lower() == u'user':
doGetUserInfo()
elif sys.argv[2].lower() == u'group':
doGetGroupInfo()
elif sys.argv[2].lower() in [u'nickname', u'alias']:
doGetAliasInfo()
elif sys.argv[2].lower() == u'instance':
doGetInstanceInfo()
elif sys.argv[2].lower() in [u'org', u'ou']:
doGetOrgInfo()
elif sys.argv[2].lower() == u'resource':
doGetResourceCalendarInfo()
elif sys.argv[2].lower() == u'cros':
doGetCrosInfo()
elif sys.argv[2].lower() == u'mobile':
doGetMobileInfo()
elif sys.argv[2].lower() in [u'notifications', u'notification']:
doGetNotifications()
elif sys.argv[2].lower() in [u'verify', u'verification']:
doGetSiteVerifications()
elif sys.argv[2].lower() in [u'schema', u'schemas']:
doGetUserSchema()
elif sys.argv[2].lower() in [u'course', u'class']:
doGetCourseInfo()
elif sys.argv[2].lower() in [u'printer', u'print']:
doGetPrinterInfo()
elif sys.argv[2].lower() in [u'transfer', u'datatransfer']:
doGetDataTransferInfo()
elif sys.argv[2].lower() in [u'customer',]:
doGetCustomerInfo()
elif sys.argv[2].lower() in [u'domain',]:
doGetDomainInfo()
elif sys.argv[2].lower() in [u'domainalias', u'aliasdomain']:
doGetDomainAliasInfo()
else:
print u'ERROR: %s is not a valid argument for "gam info"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'delete':
if sys.argv[2].lower() == u'user':
doDeleteUser()
elif sys.argv[2].lower() == u'group':
doDeleteGroup()
elif sys.argv[2].lower() in [u'nickname', u'alias']:
doDeleteAlias()
elif sys.argv[2].lower() == u'org':
doDeleteOrg()
elif sys.argv[2].lower() == u'resource':
doDeleteResourceCalendar()
elif sys.argv[2].lower() == u'mobile':
doDeleteMobile()
elif sys.argv[2].lower() in [u'notification', u'notifications']:
doDeleteNotification()
elif sys.argv[2].lower() in [u'schema', u'schemas']:
doDelSchema()
elif sys.argv[2].lower() in [u'course', u'class']:
doDelCourse()
elif sys.argv[2].lower() in [u'printer', u'printers']:
doDelPrinter()
elif sys.argv[2].lower() in [u'domain',]:
doDelDomain()
elif sys.argv[2].lower() in [u'domainalias',]:
doDelDomainAlias()
elif sys.argv[2].lower() in [u'admin',]:
doDelAdmin()
else:
print u'ERROR: %s is not a valid argument for "gam delete"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'undelete':
if sys.argv[2].lower() == u'user':
doUndeleteUser()
else:
print u'ERROR: %s is not a valid argument for "gam undelete"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'audit':
if sys.argv[2].lower() == u'monitor':
if sys.argv[3].lower() == u'create':
doCreateMonitor()
elif sys.argv[3].lower() == u'list':
doShowMonitors()
elif sys.argv[3].lower() == u'delete':
doDeleteMonitor()
else:
print u'ERROR: %s is not a valid argument for "gam audit monitor"' % sys.argv[3]
sys.exit(2)
elif sys.argv[2].lower() == u'activity':
if sys.argv[3].lower() == u'request':
doRequestActivity()
elif sys.argv[3].lower() == u'status':
doStatusActivityRequests()
elif sys.argv[3].lower() == u'download':
doDownloadActivityRequest()
elif sys.argv[3].lower() == u'delete':
doDeleteActivityRequest()
else:
print u'ERROR: %s is not a valid argument for "gam audit activity"' % sys.argv[3]
sys.exit(2)
elif sys.argv[2].lower() == u'export':
if sys.argv[3].lower() == u'status':
doStatusExportRequests()
elif sys.argv[3].lower() == u'watch':
doWatchExportRequest()
elif sys.argv[3].lower() == u'download':
doDownloadExportRequest()
elif sys.argv[3].lower() == u'request':
doRequestExport()
elif sys.argv[3].lower() == u'delete':
doDeleteExport()
else:
print u'ERROR: %s is not a valid argument for "gam audit export"' % sys.argv[3]
sys.exit(2)
elif sys.argv[2].lower() == u'uploadkey':
doUploadAuditKey()
else:
print u'ERROR: %s is not a valid argument for "gam audit"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'print':
if sys.argv[2].lower() == u'users':
doPrintUsers()
elif sys.argv[2].lower() == u'nicknames' or sys.argv[2].lower() == u'aliases':
doPrintAliases()
elif sys.argv[2].lower() == u'groups':
doPrintGroups()
elif sys.argv[2].lower() in [u'group-members', u'groups-members']:
doPrintGroupMembers()
elif sys.argv[2].lower() in [u'orgs', u'ous']:
doPrintOrgs()
elif sys.argv[2].lower() == u'resources':
doPrintResourceCalendars()
elif sys.argv[2].lower() == u'cros':
doPrintCrosDevices()
elif sys.argv[2].lower() == u'mobile':
doPrintMobileDevices()
elif sys.argv[2].lower() in [u'license', u'licenses', u'licence', u'licences']:
doPrintLicenses()
elif sys.argv[2].lower() in [u'token', u'tokens']:
doPrintTokens()
elif sys.argv[2].lower() in [u'schema', u'schemas']:
doPrintUserSchemas()
elif sys.argv[2].lower() in [u'courses', u'classes']:
doPrintCourses()
elif sys.argv[2].lower() in [u'course-participants', u'class-participants']:
doPrintCourseParticipants()
elif sys.argv[2].lower() in [u'printers']:
doPrintPrinters()
elif sys.argv[2].lower() in [u'printjobs']:
doPrintPrintJobs()
elif sys.argv[2].lower() in [u'transfers', u'datatransfers']:
doPrintDataTransfers()
elif sys.argv[2].lower() in [u'transferapps']:
doPrintTransferApps()
elif sys.argv[2].lower() in [u'domains']:
doPrintDomains()
elif sys.argv[2].lower() in [u'admins']:
doPrintAdmins()
elif sys.argv[2].lower() in [u'roles', u'adminroles']:
doPrintAdminRoles()
else:
print u'ERROR: %s is not a valid argument for "gam print"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() in [u'oauth', u'oauth2']:
if sys.argv[2].lower() in [u'request', u'create']:
doRequestOAuth()
elif sys.argv[2].lower() in [u'info', u'verify']:
OAuthInfo()
elif sys.argv[2].lower() in [u'delete', u'revoke']:
doDeleteOAuth()
else:
print u'ERROR: %s is not a valid argument for "gam oauth"' % sys.argv[2]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'calendar':
if sys.argv[3].lower() == u'showacl':
doCalendarShowACL()
elif sys.argv[3].lower() == u'add':
doCalendarAddACL()
elif sys.argv[3].lower() in [u'del', u'delete']:
doCalendarDelACL()
elif sys.argv[3].lower() == u'update':
doCalendarUpdateACL()
elif sys.argv[3].lower() == u'wipe':
doCalendarWipeData()
elif sys.argv[3].lower() == u'addevent':
doCalendarAddEvent()
else:
print u'ERROR: %s is not a valid argument for "gam calendar"' % sys.argv[3]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'printer':
if sys.argv[3].lower() == u'showacl':
doPrinterShowACL()
elif sys.argv[3].lower() == u'add':
doPrinterAddACL()
elif sys.argv[3].lower() in [u'del', u'delete', u'remove']:
doPrinterDelACL()
elif sys.argv[3].lower() == u'register':
doPrinterRegister()
else:
print u'ERROR: invalid argument for "gam printer..."'
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'printjob':
if sys.argv[3].lower() == u'delete':
doDeletePrintJob()
elif sys.argv[3].lower() == u'cancel':
doCancelPrintJob()
elif sys.argv[3].lower() == u'submit':
doPrintJobSubmit()
elif sys.argv[3].lower() == u'fetch':
doPrintJobFetch()
elif sys.argv[3].lower() == u'resubmit':
doPrintJobResubmit()
else:
print u'ERROR: %s is not a valid argument for "gam printjob"' % sys.argv[3]
sys.exit(2)
sys.exit(0)
elif sys.argv[1].lower() == u'report':
showReport()
sys.exit(0)
elif sys.argv[1].lower() == u'whatis':
doWhatIs()
sys.exit(0)
elif sys.argv[1].lower() in [u'course', u'class']:
if sys.argv[3].lower() in [u'add', u'create']:
doAddCourseParticipant()
sys.exit(0)
elif sys.argv[3].lower() in [u'del', u'delete', u'remove']:
doDelCourseParticipant()
sys.exit(0)
elif sys.argv[3].lower() == u'sync':
doSyncCourseParticipants()
sys.exit(0)
else:
print u'ERROR: %s is not a valid argument for "gam course"' % sys.argv[3]
sys.exit(2)
users = getUsersToModify()
command = sys.argv[3].lower()
if command == u'print':
for user in users:
print user
sys.exit(0)
try:
if (GC_Values[GC_AUTO_BATCH_MIN] > 0) and (len(users) > GC_Values[GC_AUTO_BATCH_MIN]):
items = []
for user in users:
items.append([u'user', user] + sys.argv[3:])
run_batch(items)
sys.exit(0)
except TypeError:
pass
if command == u'transfer':
transferWhat = sys.argv[4].lower()
if transferWhat == u'drive':
transferDriveFiles(users)
elif transferWhat == u'seccals':
transferSecCals(users)
else:
print u'ERROR: %s is not a valid argument for "gam <users> transfer"' % sys.argv[4]
sys.exit(2)
elif command == u'show':
readWhat = sys.argv[4].lower()
if readWhat in [u'labels', u'label']:
showLabels(users)
elif readWhat == u'profile':
showProfile(users)
elif readWhat == u'calendars':
showCalendars(users)
elif readWhat == u'calsettings':
showCalSettings(users)
elif readWhat == u'drivesettings':
showDriveSettings(users)
elif readWhat == u'drivefileacl':
showDriveFileACL(users)
elif readWhat == u'filelist':
showDriveFiles(users)
elif readWhat == u'filetree':
showDriveFileTree(users)
elif readWhat == u'fileinfo':
showDriveFileInfo(users)
elif readWhat == u'sendas':
showSendAs(users)
elif readWhat == u'gmailprofile':
showGmailProfile(users)
elif readWhat in [u'sig', u'signature']:
getSignature(users)
elif readWhat == u'forward':
getForward(users)
elif readWhat in [u'pop', u'pop3']:
getPop(users)
elif readWhat in [u'imap', u'imap4']:
getImap(users)
elif readWhat == u'vacation':
getVacation(users)
elif readWhat in [u'delegate', u'delegates']:
getDelegates(users)
elif readWhat in [u'backupcode', u'backupcodes', u'verificationcodes']:
doGetBackupCodes(users)
elif readWhat in [u'asp', u'asps', u'applicationspecificpasswords']:
doGetASPs(users)
elif readWhat in [u'token', u'tokens', u'oauth', u'3lo']:
doGetTokens(users)
elif readWhat in [u'driveactivity']:
doDriveActivity(users)
else:
print u'ERROR: %s is not a valid argument for "gam <users> show"' % sys.argv[4]
sys.exit(2)
elif command == u'trash':
if sys.argv[4].lower() in [u'message', u'messages']:
doDeleteMessages(trashOrDelete=u'trash', users=users)
else:
print u'ERROR: %s is not a valid argument for "gam <users> trash"' % sys.argv[4]
sys.exit(2)
elif command == u'delete' or command == u'del':
delWhat = sys.argv[4].lower()
if delWhat == u'delegate':
deleteDelegate(users)
elif delWhat == u'calendar':
deleteCalendar(users)
elif delWhat == u'label':
doDeleteLabel(users)
elif delWhat in [u'message', u'messages']:
doDeleteMessages(trashOrDelete=u'delete', users=users)
elif delWhat == u'photo':
deletePhoto(users)
elif delWhat in [u'license', u'licence']:
doLicense(users, u'delete')
elif delWhat in [u'backupcode', u'backupcodes', u'verificationcodes']:
doDelBackupCodes(users)
elif delWhat in [u'asp', u'asps', u'applicationspecificpasswords']:
doDelASP(users)
elif delWhat in [u'token', u'tokens', u'oauth', u'3lo']:
doDelTokens(users)
elif delWhat in [u'group', u'groups']:
doRemoveUsersGroups(users)
elif delWhat in [u'alias', u'aliases']:
doRemoveUsersAliases(users)
elif delWhat in [u'emptydrivefolders']:
deleteEmptyDriveFolders(users)
elif delWhat in [u'drivefile']:
deleteDriveFile(users)
elif delWhat in [u'drivefileacl', u'drivefileacls']:
delDriveFileACL(users)
else:
print u'ERROR: %s is not a valid argument for "gam <users> delete"' % sys.argv[4]
sys.exit(2)
elif command == u'add':
addWhat = sys.argv[4].lower()
if addWhat == u'calendar':
addCalendar(users)
elif addWhat == u'drivefile':
createDriveFile(users)
elif addWhat in [u'license', u'licence']:
doLicense(users, u'insert')
elif addWhat in [u'drivefileacl', u'drivefileacls']:
addDriveFileACL(users)
elif addWhat in [u'label', u'labels']:
doLabel(users, 5)
else:
print u'ERROR: %s is not a valid argument for "gam <users> add"' % sys.argv[4]
sys.exit(2)
elif command == u'update':
if sys.argv[4].lower() == u'calendar':
updateCalendar(users)
elif sys.argv[4].lower() == u'calattendees':
changeCalendarAttendees(users)
elif sys.argv[4].lower() == u'photo':
doPhoto(users)
elif sys.argv[4].lower() in [u'license', u'licence']:
doLicense(users, u'patch')
elif sys.argv[4].lower() == u'user':
doUpdateUser(users, 5)
elif sys.argv[4].lower() in [u'backupcode', u'backupcodes', u'verificationcodes']:
doGenBackupCodes(users)
elif sys.argv[4].lower() in [u'drivefile']:
doUpdateDriveFile(users)
elif sys.argv[4].lower() in [u'drivefileacls', u'drivefileacl']:
updateDriveFileACL(users)
elif sys.argv[4].lower() in [u'label', u'labels']:
renameLabels(users)
elif sys.argv[4].lower() in [u'labelsettings']:
updateLabels(users)
else:
print u'ERROR: %s is not a valid argument for "gam <users> update"' % sys.argv[4]
sys.exit(2)
elif command in [u'deprov', u'deprovision']:
doDeprovUser(users)
elif command == u'get':
if sys.argv[4].lower() == u'photo':
getPhoto(users)
elif sys.argv[4].lower() == u'drivefile':
downloadDriveFile(users)
elif command == u'profile':
doProfile(users)
elif command == u'imap':
doImap(users)
elif command in [u'pop', u'pop3']:
doPop(users)
elif command == u'sendas':
doSendAs(users)
elif command == u'language':
doLanguage(users)
elif command in [u'utf', u'utf8', u'utf-8', u'unicode']:
doUTF(users)
elif command == u'pagesize':
doPageSize(users)
elif command == u'shortcuts':
doShortCuts(users)
elif command == u'arrows':
doArrows(users)
elif command == u'snippets':
doSnippets(users)
elif command == u'label':
doLabel(users, 4)
elif command == u'filter':
doFilter(users)
elif command == u'forward':
doForward(users)
elif command in [u'sig', u'signature']:
doSignature(users)
elif command == u'vacation':
doVacation(users)
elif command == u'webclips':
doWebClips(users)
elif command in [u'delegate', u'delegates']:
doDelegates(users)
else:
print u'ERROR: %s is not a valid argument for "gam"' % command
sys.exit(2)
except IndexError:
showUsage()
sys.exit(2)
except KeyboardInterrupt:
sys.exit(50)
except socket.error, e:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, e))
sys.exit(3)
except MemoryError:
sys.stderr.write(u'{0}{1}\n'.format(ERROR_PREFIX, MESSAGE_GAM_OUT_OF_MEMORY))
sys.exit(99)
|
fibbo.py | #!/usr/bin/env python3
# Hardware simulation of fibbo
# Author Malcolm Davis
from queue import Queue
import threading
import sys
ck = Queue()
ak = Queue()
fk = Queue()
bk = Queue()
dk = Queue()
ek = Queue()
cont = True
if(len(sys.argv)>1):
max = int(sys.argv[1])
else:
max = 10000
def addMod():
while(cont):
val = ck.get(True)+fk.get(True)
ak.put(val)
def splitMod():
while(cont):
val =bk.get(True)
ek.put(val)
ck.put(val)
dk.put(val)
def delay1Mod():
first = True
while(cont):
if first:
bk.put(1)
first=not first
else:
bk.put(ak.get())
def delay0Mod():
first = True
while(cont):
if first:
fk.put(0)
first=not first
else:
fk.put(ek.get())
def printMod():
global cont
while (cont):
val = dk.get()
if (val>max):
cont = not cont
break
print(val)
def main():
threading.Thread(target=addMod).start()
threading.Thread(target=splitMod).start()
threading.Thread(target=delay1Mod).start()
threading.Thread(target=delay0Mod).start()
threading.Thread(target=printMod).start()
if __name__ == "__main__":
main() |
scheduler.py | import logging
import os
import signal
import time
import traceback
from datetime import datetime
from multiprocessing import Process
from .job import Job
from .queue import Queue
from .registry import ScheduledJobRegistry
from .utils import current_timestamp, enum
from .logutils import setup_loghandlers
from redis import Redis
SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s'
SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s'
logger = logging.getLogger(__name__)
setup_loghandlers(
level=logging.INFO,
name="rq.scheduler",
log_format="%(asctime)s: %(message)s",
date_format="%H:%M:%S"
)
class RQScheduler(object):
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = enum(
'SchedulerStatus',
STARTED='started',
WORKING='working',
STOPPED='stopped'
)
def __init__(self, queues, connection, interval=1):
self._queue_names = set(parse_names(queues))
self._acquired_locks = set()
self._scheduled_job_registries = []
self.lock_acquisition_time = None
self._connection_kwargs = connection.connection_pool.connection_kwargs
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
@property
def connection(self):
if not self._connection:
self._connection_kwargs.pop('parser_class', None)
self._connection = Redis(**self._connection_kwargs)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > 600
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
logger.info("Trying to acquire locks for %s", ", ".join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=5):
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process:
self.start()
return successful_locks
def prepare_registries(self, queue_names=None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection)
)
@classmethod
def get_locking_key(cls, name):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection)
with self.connection.pipeline() as pipeline:
# This should be done in bulk
for job_id in job_ids:
job = Job.fetch(job_id, connection=self.connection)
queue.enqueue_job(job, pipeline=pipeline)
registry.remove_jobs(timestamp)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
logger.debug("Scheduler sending heartbeat to %s", ", ".join(self.acquired_locks))
if len(self._queue_names) > 1:
with self.connection.pipeline() as pipeline:
for name in self._queue_names:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 5)
pipeline.execute()
else:
key = self.get_locking_key(next(iter(self._queue_names)))
self.connection.expire(key, self.interval + 5)
def stop(self):
logger.info("Scheduler stopping, releasing locks for %s...",
','.join(self._queue_names))
keys = [self.get_locking_key(name) for name in self._queue_names]
self.connection.delete(*keys)
self._status = self.Status.STOPPED
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
logger.info("Scheduler for %s started with PID %s",
','.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
logger.error(
'Scheduler [PID %s] raised an exception.\n%s',
os.getpid(), traceback.format_exc()
)
raise
logger.info("Scheduler with PID %s has stopped", os.getpid())
def parse_names(queues_or_names):
"""Given a list of strings or queues, returns queue names"""
names = []
for queue_or_name in queues_or_names:
if isinstance(queue_or_name, Queue):
names.append(queue_or_name.name)
else:
names.append(str(queue_or_name))
return names
|
livereload_tests.py | #!/usr/bin/env python
import contextlib
import email
import io
import os
import sys
import threading
import time
import unittest
from pathlib import Path
from unittest import mock
from mkdocs.livereload import LiveReloadServer
from mkdocs.tests.base import tempdir
class FakeRequest:
def __init__(self, content):
self.in_file = io.BytesIO(content.encode())
self.out_file = io.BytesIO()
self.out_file.close = lambda: None
def makefile(self, *args, **kwargs):
return self.in_file
def sendall(self, data):
self.out_file.write(data)
@contextlib.contextmanager
def testing_server(root, builder=lambda: None, mount_path="/"):
"""Create the server and start most of its parts, but don't listen on a socket."""
with mock.patch("socket.socket"):
server = LiveReloadServer(
builder,
host="localhost",
port=0,
root=root,
mount_path=mount_path,
build_delay=0.1,
bind_and_activate=False,
)
server.setup_environ()
server.observer.start()
thread = threading.Thread(target=server._build_loop, daemon=True)
thread.start()
yield server
server.shutdown()
thread.join()
def do_request(server, content):
request = FakeRequest(content + " HTTP/1.1")
server.RequestHandlerClass(request, ("127.0.0.1", 0), server)
response = request.out_file.getvalue()
headers, _, content = response.partition(b"\r\n\r\n")
status, _, headers = headers.partition(b"\r\n")
status = status.split(None, 1)[1].decode()
headers = email.message_from_bytes(headers)
headers["_status"] = status
return headers, content.decode()
SCRIPT_REGEX = (
r'<script src="/js/livereload.js"></script><script>livereload\([0-9]+, [0-9]+\);</script>'
)
class BuildTests(unittest.TestCase):
@tempdir({"test.css": "div { color: red; }"})
def test_serves_normal_file(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /test.css")
self.assertEqual(output, "div { color: red; }")
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-length"), str(len(output)))
@tempdir({"docs/foo.docs": "docs1", "mkdocs.yml": "yml1"})
@tempdir({"foo.site": "original"})
def test_basic_rebuild(self, site_dir, origin_dir):
docs_dir = Path(origin_dir, "docs")
started_building = threading.Event()
def rebuild():
started_building.set()
Path(site_dir, "foo.site").write_text(
Path(docs_dir, "foo.docs").read_text() + Path(origin_dir, "mkdocs.yml").read_text()
)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir, rebuild)
server.watch(Path(origin_dir, "mkdocs.yml"), rebuild)
time.sleep(0.01)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "original")
Path(docs_dir, "foo.docs").write_text("docs2")
self.assertTrue(started_building.wait(timeout=10))
started_building.clear()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2yml1")
Path(origin_dir, "mkdocs.yml").write_text("yml2")
self.assertTrue(started_building.wait(timeout=10))
started_building.clear()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2yml2")
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_rebuild_after_delete(self, site_dir, docs_dir):
started_building = threading.Event()
def rebuild():
started_building.set()
Path(site_dir, "foo.site").unlink()
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir, rebuild)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
self.assertTrue(started_building.wait(timeout=10))
with self.assertLogs("mkdocs.livereload"):
_, output = do_request(server, "GET /foo.site")
self.assertIn("404", output)
@tempdir({"aaa": "something"})
def test_rebuild_after_rename(self, site_dir):
started_building = threading.Event()
with testing_server(site_dir, started_building.set) as server:
server.watch(site_dir)
time.sleep(0.01)
Path(site_dir, "aaa").rename(Path(site_dir, "bbb"))
self.assertTrue(started_building.wait(timeout=10))
@tempdir()
def test_no_rebuild_on_edit(self, site_dir):
started_building = threading.Event()
with open(Path(site_dir, "test"), "wb") as f:
time.sleep(0.01)
with testing_server(site_dir, started_building.set) as server:
server.watch(site_dir)
time.sleep(0.01)
f.write(b"hi\n")
f.flush()
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_custom_action_warns(self, site_dir, docs_dir):
started_building = threading.Event()
def rebuild():
started_building.set()
content = Path(docs_dir, "foo.docs").read_text()
Path(site_dir, "foo.site").write_text(content * 5)
with testing_server(site_dir) as server:
with self.assertWarnsRegex(DeprecationWarning, "func") as cm:
server.watch(docs_dir, rebuild)
time.sleep(0.01)
self.assertIn("livereload_tests.py", cm.filename)
Path(docs_dir, "foo.docs").write_text("b")
self.assertTrue(started_building.wait(timeout=10))
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "bbbbb")
@tempdir({"foo.docs": "docs1"})
@tempdir({"foo.extra": "extra1"})
@tempdir({"foo.site": "original"})
def test_multiple_dirs_can_cause_rebuild(self, site_dir, extra_dir, docs_dir):
started_building = threading.Barrier(2)
def rebuild():
started_building.wait(timeout=10)
content1 = Path(docs_dir, "foo.docs").read_text()
content2 = Path(extra_dir, "foo.extra").read_text()
Path(site_dir, "foo.site").write_text(content1 + content2)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
server.watch(extra_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("docs2")
started_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra1")
Path(extra_dir, "foo.extra").write_text("extra2")
started_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra2")
@tempdir({"foo.docs": "docs1"})
@tempdir({"foo.extra": "extra1"})
@tempdir({"foo.site": "original"})
def test_multiple_dirs_changes_rebuild_only_once(self, site_dir, extra_dir, docs_dir):
started_building = threading.Event()
def rebuild():
self.assertFalse(started_building.is_set())
started_building.set()
content1 = Path(docs_dir, "foo.docs").read_text()
content2 = Path(extra_dir, "foo.extra").read_text()
Path(site_dir, "foo.site").write_text(content1 + content2)
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
server.watch(extra_dir)
time.sleep(0.01)
_, output = do_request(server, "GET /foo.site")
Path(docs_dir, "foo.docs").write_text("docs2")
Path(extra_dir, "foo.extra").write_text("extra2")
self.assertTrue(started_building.wait(timeout=10))
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "docs2extra2")
@tempdir({"foo.docs": "a"})
@tempdir({"foo.site": "original"})
def test_change_is_detected_while_building(self, site_dir, docs_dir):
before_finished_building = threading.Barrier(2)
can_finish_building = threading.Event()
def rebuild():
content = Path(docs_dir, "foo.docs").read_text()
Path(site_dir, "foo.site").write_text(content * 5)
before_finished_building.wait(timeout=10)
self.assertTrue(can_finish_building.wait(timeout=10))
with testing_server(site_dir, rebuild) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
before_finished_building.wait(timeout=10)
Path(docs_dir, "foo.docs").write_text("c")
can_finish_building.set()
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "bbbbb")
before_finished_building.wait(timeout=10)
_, output = do_request(server, "GET /foo.site")
self.assertEqual(output, "ccccc")
@tempdir(
{
"normal.html": "<html><body>hello</body></html>",
"no_body.html": "<p>hi",
"empty.html": "",
"multi_body.html": "<body>foo</body><body>bar</body>",
}
)
def test_serves_modified_html(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /normal.html")
self.assertRegex(output, fr"^<html><body>hello{SCRIPT_REGEX}</body></html>$")
self.assertEqual(headers.get("content-type"), "text/html")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /no_body.html")
self.assertRegex(output, fr"^<p>hi{SCRIPT_REGEX}$")
headers, output = do_request(server, "GET /empty.html")
self.assertRegex(output, fr"^{SCRIPT_REGEX}$")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /multi_body.html")
self.assertRegex(output, fr"^<body>foo</body><body>bar{SCRIPT_REGEX}</body>$")
@tempdir({"index.html": "<body>aaa</body>", "foo/index.html": "<body>bbb</body>"})
def test_serves_modified_index(self, site_dir):
with testing_server(site_dir) as server:
headers, output = do_request(server, "GET /")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-type"), "text/html")
self.assertEqual(headers.get("content-length"), str(len(output)))
_, output = do_request(server, "GET /foo/")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
@tempdir({"я.html": "<body>aaa</body>", "测试2/index.html": "<body>bbb</body>"})
def test_serves_with_unicode_characters(self, site_dir):
with testing_server(site_dir) as server:
_, output = do_request(server, "GET /я.html")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
_, output = do_request(server, "GET /%D1%8F.html")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
with self.assertLogs("mkdocs.livereload"):
headers, _ = do_request(server, "GET /%D1.html")
self.assertEqual(headers["_status"], "404 Not Found")
_, output = do_request(server, "GET /测试2/")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
_, output = do_request(server, "GET /%E6%B5%8B%E8%AF%952/index.html")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
@tempdir()
def test_serves_js(self, site_dir):
with testing_server(site_dir) as server:
for mount_path in "/", "/sub/":
server.mount_path = mount_path
headers, output = do_request(server, "GET /js/livereload.js")
self.assertIn("function livereload", output)
self.assertEqual(headers["_status"], "200 OK")
self.assertEqual(headers.get("content-type"), "application/javascript")
@tempdir()
def test_serves_polling_instantly(self, site_dir):
with testing_server(site_dir) as server:
_, output = do_request(server, "GET /livereload/0/0")
self.assertTrue(output.isdigit())
@tempdir()
@tempdir()
def test_serves_polling_after_event(self, site_dir, docs_dir):
with testing_server(site_dir) as server:
initial_epoch = server._visible_epoch
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "foo.docs").write_text("b")
_, output = do_request(server, f"GET /livereload/{initial_epoch}/0")
self.assertNotEqual(server._visible_epoch, initial_epoch)
self.assertEqual(output, str(server._visible_epoch))
@tempdir()
def test_serves_polling_with_timeout(self, site_dir):
with testing_server(site_dir) as server:
server.poll_response_timeout = 0.2
initial_epoch = server._visible_epoch
start_time = time.monotonic()
_, output = do_request(server, f"GET /livereload/{initial_epoch}/0")
self.assertGreaterEqual(time.monotonic(), start_time + 0.2)
self.assertEqual(output, str(initial_epoch))
@tempdir()
def test_error_handler(self, site_dir):
with testing_server(site_dir) as server:
server.error_handler = lambda code: b"[%d]" % code
with self.assertLogs("mkdocs.livereload") as cm:
headers, output = do_request(server, "GET /missing")
self.assertEqual(headers["_status"], "404 Not Found")
self.assertEqual(output, "[404]")
self.assertRegex(
"\n".join(cm.output),
r'^WARNING:mkdocs.livereload:.*"GET /missing HTTP/1.1" code 404',
)
@tempdir()
def test_bad_error_handler(self, site_dir):
self.maxDiff = None
with testing_server(site_dir) as server:
server.error_handler = lambda code: 0 / 0
with self.assertLogs("mkdocs.livereload") as cm:
headers, output = do_request(server, "GET /missing")
self.assertEqual(headers["_status"], "404 Not Found")
self.assertIn("404", output)
self.assertRegex(
"\n".join(cm.output), r"Failed to render an error message[\s\S]+/missing.+code 404"
)
@tempdir(
{
"test.html": "<!DOCTYPE html>\nhi",
"test.xml": '<?xml version="1.0" encoding="UTF-8"?>\n<foo></foo>',
"test.css": "div { color: red; }",
"test.js": "use strict;",
"test.json": '{"a": "b"}',
}
)
def test_mime_types(self, site_dir):
with testing_server(site_dir) as server:
headers, _ = do_request(server, "GET /test.html")
self.assertEqual(headers.get("content-type"), "text/html")
headers, _ = do_request(server, "GET /test.xml")
self.assertIn(headers.get("content-type"), ["text/xml", "application/xml"])
headers, _ = do_request(server, "GET /test.css")
self.assertEqual(headers.get("content-type"), "text/css")
headers, _ = do_request(server, "GET /test.js")
self.assertEqual(headers.get("content-type"), "application/javascript")
headers, _ = do_request(server, "GET /test.json")
self.assertEqual(headers.get("content-type"), "application/json")
@tempdir({"index.html": "<body>aaa</body>", "sub/sub.html": "<body>bbb</body>"})
def test_serves_from_mount_path(self, site_dir):
with testing_server(site_dir, mount_path="/sub") as server:
headers, output = do_request(server, "GET /sub/")
self.assertRegex(output, fr"^<body>aaa{SCRIPT_REGEX}</body>$")
self.assertEqual(headers.get("content-type"), "text/html")
_, output = do_request(server, "GET /sub/sub/sub.html")
self.assertRegex(output, fr"^<body>bbb{SCRIPT_REGEX}</body>$")
with self.assertLogs("mkdocs.livereload"):
headers, _ = do_request(server, "GET /sub/sub.html")
self.assertEqual(headers["_status"], "404 Not Found")
@tempdir()
def test_redirects_to_mount_path(self, site_dir):
with testing_server(site_dir, mount_path="/mount/path") as server:
with self.assertLogs("mkdocs.livereload"):
headers, _ = do_request(server, "GET /")
self.assertEqual(headers["_status"], "302 Found")
self.assertEqual(headers.get("location"), "/mount/path/")
@tempdir({"mkdocs.yml": "original", "mkdocs2.yml": "original"}, prefix="tmp_dir")
@tempdir(prefix="origin_dir")
@tempdir({"subdir/foo.md": "original"}, prefix="dest_docs_dir")
def test_watches_direct_symlinks(self, dest_docs_dir, origin_dir, tmp_dir):
try:
Path(origin_dir, "docs").symlink_to(dest_docs_dir, target_is_directory=True)
Path(origin_dir, "mkdocs.yml").symlink_to(Path(tmp_dir, "mkdocs.yml"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
def wait_for_build():
result = started_building.wait(timeout=10)
started_building.clear()
with self.assertLogs("mkdocs.livereload"):
do_request(server, "GET /")
return result
with testing_server(tmp_dir, started_building.set) as server:
server.watch(Path(origin_dir, "docs"))
server.watch(Path(origin_dir, "mkdocs.yml"))
time.sleep(0.01)
Path(tmp_dir, "mkdocs.yml").write_text("edited")
self.assertTrue(wait_for_build())
Path(dest_docs_dir, "subdir", "foo.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(origin_dir, "unrelated.md").write_text("foo")
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir(["file_dest_1.md", "file_dest_2.md", "file_dest_unused.md"], prefix="tmp_dir")
@tempdir(["file_under.md"], prefix="dir_to_link_to")
@tempdir()
def test_watches_through_symlinks(self, docs_dir, dir_to_link_to, tmp_dir):
try:
Path(docs_dir, "link1.md").symlink_to(Path(tmp_dir, "file_dest_1.md"))
Path(docs_dir, "linked_dir").symlink_to(dir_to_link_to, target_is_directory=True)
Path(dir_to_link_to, "sublink.md").symlink_to(Path(tmp_dir, "file_dest_2.md"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
def wait_for_build():
result = started_building.wait(timeout=10)
started_building.clear()
with self.assertLogs("mkdocs.livereload"):
do_request(server, "GET /")
return result
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(tmp_dir, "file_dest_1.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(dir_to_link_to, "file_under.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(tmp_dir, "file_dest_2.md").write_text("edited")
self.assertTrue(wait_for_build())
Path(docs_dir, "link1.md").unlink()
self.assertTrue(wait_for_build())
Path(tmp_dir, "file_dest_unused.md").write_text("edited")
self.assertFalse(started_building.wait(timeout=0.2))
@tempdir(prefix="site_dir")
@tempdir(["docs/unused.md", "README.md"], prefix="origin_dir")
def test_watches_through_relative_symlinks(self, origin_dir, site_dir):
docs_dir = Path(origin_dir, "docs")
old_cwd = os.getcwd()
os.chdir(docs_dir)
try:
Path(docs_dir, "README.md").symlink_to(Path("..", "README.md"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
finally:
os.chdir(old_cwd)
started_building = threading.Event()
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(origin_dir, "README.md").write_text("edited")
self.assertTrue(started_building.wait(timeout=10))
@tempdir()
def test_watch_with_broken_symlinks(self, docs_dir):
Path(docs_dir, "subdir").mkdir()
try:
if sys.platform != "win32":
Path(docs_dir, "subdir", "circular").symlink_to(Path(docs_dir))
Path(docs_dir, "broken_1").symlink_to(Path(docs_dir, "oh no"))
Path(docs_dir, "broken_2").symlink_to(Path(docs_dir, "oh no"), target_is_directory=True)
Path(docs_dir, "broken_3").symlink_to(Path(docs_dir, "broken_2"))
except NotImplementedError: # PyPy on Windows
self.skipTest("Creating symlinks not supported")
started_building = threading.Event()
with testing_server(docs_dir, started_building.set) as server:
server.watch(docs_dir)
time.sleep(0.01)
Path(docs_dir, "subdir", "test").write_text("test")
self.assertTrue(started_building.wait(timeout=10))
|
depthai_record.py | #!/usr/bin/env python3
from pathlib import Path
from multiprocessing import Queue
from threading import Thread
import depthai as dai
from enum import Enum
import cv2
class EncodingQuality(Enum):
BEST = 1 # Lossless MJPEG
HIGH = 2 # MJPEG Quality=97 (default)
MEDIUM = 3 # MJPEG Quality=93
LOW = 4 # H265 BitrateKbps=10000
class Record():
def __init__(self, path: Path, device) -> None:
self.save = ['color', 'left', 'right']
self.fps = 30
self.timelapse = -1
self.device = device
self.quality = EncodingQuality.HIGH
self.rotate = -1
self.preview = False
self.stereo = 1 < len(device.getConnectedCameras())
self.mxid = device.getMxId()
self.path = self.create_folder(path, self.mxid)
calibData = device.readCalibration()
calibData.eepromToJsonFile(str(self.path / "calib.json"))
self.convert_mp4 = False
def run(self):
files = {}
def create_video_file(name):
if name == 'depth': # or (name=='color' and 'depth' in self.save):
files[name] = self.depthAiBag
else:
ext = 'h265' if self.quality == EncodingQuality.LOW else 'mjpeg'
files[name] = open(str(self.path / f"{name}.{ext}"), 'wb')
# if name == "color": fourcc = "I420"
# elif name == "depth": fourcc = "Y16 " # 16-bit uncompressed greyscale image
# else : fourcc = "GREY" #Simple, single Y plane for monochrome images.
# files[name] = VideoWriter(str(path / f"{name}.avi"), VideoWriter_fourcc(*fourcc), fps, sizes[name], isColor=name=="color")
while True:
try:
frames = self.frame_q.get()
if frames is None:
break
for name in frames:
if name not in files: # File wasn't created yet
create_video_file(name)
# if self.rotate != -1: # Doesn't work atm
# frames[name] = cv2.rotate(frames[name], self.rotate)
files[name].write(frames[name])
# frames[name].tofile(files[name])
except KeyboardInterrupt:
break
# Close all files - Can't use ExitStack with VideoWriter
for name in files:
files[name].close()
print('Exiting store frame thread')
def start(self):
if not self.stereo: # If device doesn't have stereo camera pair
if "left" in self.save: self.save.remove("left")
if "right" in self.save: self.save.remove("right")
if "disparity" in self.save: self.save.remove("disparity")
if "depth" in self.save: self.save.remove("depth")
if self.preview: self.save.append('preview')
if 0 < self.timelapse:
self.fps = 5
self.pipeline, self.nodes = self.create_pipeline()
if "depth" in self.save:
from libraries.depthai_rosbags import DepthAiBags
res = ['depth']
# If rotate 90 degrees
if self.rotate in [0,2]: res = (res[1], res[0])
self.depthAiBag = DepthAiBags(self.path, self.device, self.get_sizes(), rgb='color' in self.save)
self.frame_q = Queue(20)
self.process = Thread(target=self.run)
self.process.start()
self.device.startPipeline(self.pipeline)
self.queues = []
maxSize = 1 if 0 < self.timelapse else 10
for stream in self.save:
self.queues.append({
'q': self.device.getOutputQueue(name=stream, maxSize=maxSize, blocking=False),
'msgs': [],
'name': stream,
'mxid': self.mxid
})
def set_fps(self, fps):
self.fps = fps
def set_timelapse(self, timelapse):
self.timelapse = timelapse
def set_quality(self, quality: EncodingQuality):
self.quality = quality
def set_preview(self, preview: bool):
self.preview = preview
'''
Available values for `angle`:
- cv2.ROTATE_90_CLOCKWISE (0)
- cv2.ROTATE_180 (1)
- cv2.ROTATE_90_COUNTERCLOCKWISE (2)
'''
def set_rotate(self, angle):
raise Exception("Rotating not yet supported!")
# Currently RealSense Viewer throws error "memory access violation". Debug.
self.rotate = angle
# Which streams to save to the disk (on the host)
def set_save_streams(self, save_streams):
self.save = save_streams
print('save', self.save)
def get_sizes(self):
dict = {}
if "color" in self.save: dict['color'] = self.nodes['color'].getVideoSize()
if "right" in self.save: dict['right'] = self.nodes['right'].getResolutionSize()
if "left" in self.save: dict['left'] = self.nodes['left'].getResolutionSize()
if "disparity" in self.save: dict['disparity'] = self.nodes['left'].getResolutionSize()
if "depth" in self.save: dict['depth'] = self.nodes['left'].getResolutionSize()
return dict
def create_folder(self, path: Path, mxid: str):
i = 0
while True:
i += 1
recordings_path = path / f"{i}-{str(mxid)}"
if not recordings_path.is_dir():
recordings_path.mkdir(parents=True, exist_ok=False)
return recordings_path
def create_pipeline(self):
pipeline = dai.Pipeline()
nodes = {}
def create_mono(name):
nodes[name] = pipeline.create(dai.node.MonoCamera)
nodes[name].setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
socket = dai.CameraBoardSocket.LEFT if name == "left" else dai.CameraBoardSocket.RIGHT
nodes[name].setBoardSocket(socket)
nodes[name].setFps(self.fps)
def stream_out(name, fps, out, noEnc=False):
# Create XLinkOutputs for the stream
xout = pipeline.create(dai.node.XLinkOut)
xout.setStreamName(name)
if noEnc:
out.link(xout.input)
return
encoder = pipeline.create(dai.node.VideoEncoder)
profile = dai.VideoEncoderProperties.Profile.H265_MAIN if self.quality == EncodingQuality.LOW else dai.VideoEncoderProperties.Profile.MJPEG
encoder.setDefaultProfilePreset(fps, profile)
if self.quality == EncodingQuality.BEST:
encoder.setLossless(True)
elif self.quality == EncodingQuality.HIGH:
encoder.setQuality(97)
elif self.quality == EncodingQuality.MEDIUM:
encoder.setQuality(93)
elif self.quality == EncodingQuality.LOW:
encoder.setBitrateKbps(10000)
out.link(encoder.input)
encoder.bitstream.link(xout.input)
if "color" in self.save:
nodes['color'] = pipeline.create(dai.node.ColorCamera)
nodes['color'].setBoardSocket(dai.CameraBoardSocket.RGB)
# RealSense Viewer expects RGB color order
nodes['color'].setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
nodes['color'].setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
nodes['color'].setIspScale(1,2) # 1080P
nodes['color'].setFps(self.fps)
if self.preview:
nodes['color'].setPreviewSize(640, 360)
stream_out("preview", None, nodes['color'].preview, noEnc=True)
# TODO change out to .isp instead of .video when ImageManip will support I420 -> NV12
# Don't encode color stream if we save depth; as we will be saving color frames in rosbags as well
stream_out("color", nodes['color'].getFps(), nodes['color'].video) #, noEnc='depth' in self.save)
if True in (el in ["left", "disparity", "depth"] for el in self.save):
create_mono("left")
if "left" in self.save:
stream_out("left", nodes['left'].getFps(), nodes['left'].out)
if True in (el in ["right", "disparity", "depth"] for el in self.save):
create_mono("right")
if "right" in self.save:
stream_out("right", nodes['right'].getFps(), nodes['right'].out)
if True in (el in ["disparity", "depth"] for el in self.save):
nodes['stereo'] = pipeline.create(dai.node.StereoDepth)
nodes['stereo'].initialConfig.setConfidenceThreshold(255)
nodes['stereo'].initialConfig.setMedianFilter(dai.StereoDepthProperties.MedianFilter.KERNEL_7x7)
# TODO: configurable
nodes['stereo'].setLeftRightCheck(True)
nodes['stereo'].setExtendedDisparity(False)
if "disparity" not in self.save and "depth" in self.save:
nodes['stereo'].setSubpixel(True) # For better depth visualization
# if "depth" and "color" in self.save: # RGB depth alignment
# nodes['color'].setIspScale(1,3) # 4k -> 720P
# # For now, RGB needs fixed focus to properly align with depth.
# # This value was used during calibration
# nodes['color'].initialControl.setManualFocus(130)
# nodes['stereo'].setDepthAlign(dai.CameraBoardSocket.RGB)
nodes['left'].out.link(nodes['stereo'].left)
nodes['right'].out.link(nodes['stereo'].right)
if "disparity" in self.save:
stream_out("disparity", nodes['right'].getFps(), nodes['stereo'].disparity)
if "depth" in self.save:
stream_out('depth', None, nodes['stereo'].depth, noEnc=True)
self.nodes = nodes
self.pipeline = pipeline
return pipeline, nodes
|
test_stuff.py | from multiprocessing import Process, Manager
from random import randrange
def f():
for _ in range(99999):
print(d[randrange(0,25)])
if __name__ == '__main__':
# Init
#manager = Manager()
#d = manager.dict()
# Copy from pickle dict to manager dict
#d[1] = '1'
#d['2'] = 2
d = {i:str(i) for i in range(100)}
workers = 28
processes = []
for worker in range(workers):
p = Process(target=f)
p.start()
processes.append(p)
for p in processes:
p.join() |
engine.py | """
"""
import logging
import smtplib
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import CancelRequest, LogData, OrderRequest, SubscribeRequest
from .setting import SETTINGS
from .utility import Singleton, get_folder_path
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.init_engines()
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
def add_gateway(self, gateway_class: BaseGateway):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
def add_app(self, app_class: BaseApp):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
self.add_engine(app.engine_class)
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
__metaclass__ = Singleton
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="w", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Output log event data with logging function.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
sublist3r.py | #!/usr/bin/env python
# coding: utf-8
# Sublist3r v1.0
# By Ahmed Aboul-Ela - twitter.com/aboul3la
# modules in standard library
import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
# external modules
from subbrute import subbrute
import dns.resolver
import requests
# Python 2.x and 3.x compatiablity
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urlparse
import urllib
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# Check if we are running this on windows platform
is_windows = sys.platform.startswith('win')
# Console Colors
if is_windows:
# Windows deserves coloring too :D
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
try:
import win_unicode_console , colorama
win_unicode_console.enable()
colorama.init()
#Now the unicode will work ^_^
except:
print("[!] Error: Coloring libraries not installed, no coloring will be used [Check the readme]")
G = Y = B = R = W = G = Y = B = R = W = ''
else:
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def no_color():
global G, Y, B, R, W
G = Y = B = R = W = ''
def banner():
print("""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
""" % (R, W, Y))
def parser_error(errmsg):
banner()
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file')
parser.add_argument('-n', '--no-color', help='Output without color', default=False, action='store_true')
return parser.parse_args()
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain + os.linesep)
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split('.')[::-1]
if parts[-1] == 'www':
return parts[:-1], 1
return parts, 0
class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or []
self.domain = urlparse.urlparse(domain).netloc
self.session = requests.Session()
self.subdomains = []
self.timeout = 25
self.base_url = base_url
self.engine_name = engine_name
self.silent = silent
self.verbose = verbose
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
self.print_banner()
def print_(self, text):
if not self.silent:
print(text)
return
def print_banner(self):
""" subclass can override this if they want a fancy banner :)"""
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def get_response(self, response):
if response is None:
return 0
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
def check_max_pages(self, num):
if self.MAX_PAGES == 0:
return False
return num >= self.MAX_PAGES
# override
def extract_domains(self, resp):
""" chlid class should override this function """
return
# override
def check_response_errors(self, resp):
""" chlid class should override this function
The function should return True if there are no errors and False otherwise
"""
return True
def should_sleep(self):
"""Some enumrators require sleeping to avoid bot detections like Google enumerator"""
return
def generate_query(self):
""" chlid class should override this function """
return
def get_page(self, num):
""" chlid class that user different pagnation counter should override this function """
return num + 10
def enumerate(self, altquery=False):
flag = True
page_no = 0
prev_links = []
retries = 0
while flag:
query = self.generate_query()
count = query.count(self.domain) # finding the number of subdomains found so far
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
prev_links = links
self.should_sleep()
return self.subdomains
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
def __init__(self, base_url, engine_name, domain, subdomains=None, q=None, lock=threading.Lock(), silent=False, verbose=True):
subdomains = subdomains or []
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains, silent=silent, verbose=verbose)
multiprocessing.Process.__init__(self)
self.lock = lock
self.q = q
return
def run(self):
domain_list = self.enumerate()
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
self.engine_name = "Google"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 200
super(GoogleEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<cite.*?>(.*?)<\/cite>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
link = re.sub('<span.*>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if (type(resp) is str or type(resp) is unicode) and 'Our systems have detected unusual traffic' in resp:
self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
def should_sleep(self):
time.sleep(5)
return
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
self.engine_name = "Yahoo"
self.MAX_DOMAINS = 10
self.MAX_PAGES = 0
super(YahooEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx2 = re.compile('<span class=" fz-.*? fw-m fc-12th wr-bw.*?">(.*?)</span>')
link_regx = re.compile('<span class="txt"><span class=" cite fw-xl fz-15px">(.*?)</span>')
links_list = []
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def should_sleep(self):
return
def get_page(self, num):
return num + 10
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -domain:www.{domain} -domain:{found}'
found = ' -domain:'.join(self.subdomains[:77])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
self.engine_name = "Ask"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<p class="web-result-url">(.*?)</p>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def get_page(self, num):
return num + 1
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q
self.verbose = verbose
return
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<li class="b_algo"><h2><a href="(.*?)"')
link_regx2 = re.compile('<div class="b_title"><h2><a href="(.*?)"')
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def generate_query(self):
if self.subdomains:
fmt = 'domain:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}'
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
links = list()
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub('<.*?>|>|<| ', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ''
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = ' -site:'.join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(domain=self.domain, found=found)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.engine_name = "Netcraft"
self.lock = threading.Lock()
super(NetcraftEnum, self).__init__(self.base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies)
except Exception as e:
self.print_(e)
resp = None
return resp
def should_sleep(self):
time.sleep(random.randint(1, 2))
return
def get_next(self, resp):
link_regx = re.compile('<a.*?href="(.*?)">Next Page')
link = link_regx.findall(resp)
url = 'http://searchdns.netcraft.com' + link[0]
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
# hashlib.sha1 requires utf-8 encoded str
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1]).encode('utf-8')).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain='example.com')
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next Page' not in resp:
return self.subdomains
break
url = self.get_next(resp)
self.should_sleep()
def extract_domains(self, resp):
links_list = list()
link_regx = re.compile('<a class="results-table__host" href="(.*?)"')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://dnsdumpster.com/'
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.threads = 70
self.lock = threading.BoundedSemaphore(value=self.threads)
self.q = q
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
self.lock.acquire()
try:
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers['Referer'] = 'https://dnsdumpster.com'
try:
if req_method == 'GET':
resp = self.session.get(url, headers=headers, timeout=self.timeout)
else:
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile('<input type="hidden" name="csrfmiddlewaretoken" value="(.*?)">', re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ''
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.virustotal.com/ui/domains/{domain}/subdomains'
self.engine_name = "Virustotal"
self.lock = threading.Lock()
self.q = q
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.url = self.base_url.format(domain=self.domain)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
while self.url != '':
resp = self.send_req(self.url)
resp = json.loads(resp)
if 'error' in resp:
self.print_(R + "[!] Error: Virustotal probably now is blocking our requests" + W)
break
if 'links' in resp and 'next' in resp['links']:
self.url = resp['links']['next']
else:
self.url = ''
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
#resp is already parsed as json
try:
for i in resp['data']:
if i['type'] == 'domain':
subdomain = i['id']
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}'
self.engine_name = "ThreatCrowd"
self.lock = threading.Lock()
self.q = q
super(ThreatCrowd, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)['subdomains']
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://crt.sh/?q=%25.{domain}'
self.engine_name = "SSL Certificates"
self.lock = threading.Lock()
self.q = q
super(CrtSearch, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<TD>(.*?)</TD>')
try:
links = link_regx.findall(resp)
for link in links:
link = link.strip()
subdomains = []
if '<BR>' in link:
subdomains = link.split('<BR>')
else:
subdomains.append(link)
for subdomain in subdomains:
if not subdomain.endswith(self.domain) or '*' in subdomain:
continue
if '@' in subdomain:
subdomain = subdomain[subdomain.find('@')+1:]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
print(e)
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://api.sublist3r.com/search.php?domain={domain}'
self.engine_name = "PassiveDNS"
self.lock = threading.Lock()
self.q = q
super(PassiveDNS, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.threads = 20
self.lock = threading.BoundedSemaphore(value=self.threads)
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
result = s.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
s.close()
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self):
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
bruteforce_list = set()
search_list = set()
if is_windows:
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(B + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
process_count = threads
output = False
json_output = False
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list, verbose)
subdomains = search_list.union(bruteforce_list)
if subdomains:
subdomains = sorted(subdomains, key=subdomain_sorting_key)
if savefile:
write_file(savefile, subdomains)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y, ports) + W)
ports = ports.split(',')
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G + subdomain + W)
return subdomains
def interactive():
args = parse_args()
domain = args.domain
threads = args.threads
savefile = args.output
ports = args.ports
enable_bruteforce = args.bruteforce
verbose = args.verbose
engines = args.engines
if verbose or verbose is None:
verbose = True
if args.no_color:
no_color()
banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)
if __name__ == "__main__":
interactive()
|
test_dota_base_q.py | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
def parse_args():
parser = argparse.ArgumentParser('Start testing.')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/DOTA/test/images/', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--h_len', dest='h_len',
help='image height',
default=600, type=int)
parser.add_argument('--w_len', dest='w_len',
help='image width',
default=600, type=int)
parser.add_argument('--h_overlap', dest='h_overlap',
help='height overlap',
default=150, type=int)
parser.add_argument('--w_overlap', dest='w_overlap',
help='width overlap',
default=150, type=int)
args = parser.parse_args()
return args
class TestDOTA(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
if self.cfgs.NET_NAME in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for img_path in images:
# if 'P0016' not in img_path:
# continue
img = cv2.imread(img_path)
box_res_rotate = []
label_res_rotate = []
score_res_rotate = []
imgH = img.shape[0]
imgW = img.shape[1]
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
if imgH < self.args.h_len:
temp = np.zeros([self.args.h_len, imgW, 3], np.float32)
temp[0:imgH, :, :] = img
img = temp
imgH = self.args.h_len
if imgW < self.args.w_len:
temp = np.zeros([imgH, self.args.w_len, 3], np.float32)
temp[:, 0:imgW, :] = img
img = temp
imgW = self.args.w_len
for hh in range(0, imgH, self.args.h_len - self.args.h_overlap):
if imgH - hh - 1 < self.args.h_len:
hh_ = imgH - self.args.h_len
else:
hh_ = hh
for ww in range(0, imgW, self.args.w_len - self.args.w_overlap):
if imgW - ww - 1 < self.args.w_len:
ww_ = imgW - self.args.w_len
else:
ww_ = ww
src_img = img[hh_:(hh_ + self.args.h_len), ww_:(ww_ + self.args.w_len), :]
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if self.args.h_len < self.args.w_len:
new_h, new_w = short_size, min(int(short_size * float(self.args.w_len) / self.args.h_len), max_len)
else:
new_h, new_w = min(int(short_size * float(self.args.h_len) / self.args.w_len), max_len), short_size
img_resize = cv2.resize(src_img, (new_w, new_h))
resized_img, det_boxes_r_, det_scores_r_, det_category_r_ = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
src_h, src_w = src_img.shape[0], src_img.shape[1]
if len(det_boxes_r_) > 0:
# det_boxes_r_ = forward_convert(det_boxes_r_, False)
det_boxes_r_[:, 0::2] *= (src_w / resized_w)
det_boxes_r_[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_)):
box_rotate = det_boxes_r_[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_[ii])
score_res_rotate.append(det_scores_r_[ii])
if self.args.flip_img:
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = (src_w - box_rotate[0::2]) + ww_
box_rotate[1::2] = box_rotate[1::2] + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
det_boxes_r_flip, det_scores_r_flip, det_category_r_flip = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
if len(det_boxes_r_flip) > 0:
det_boxes_r_flip = forward_convert(det_boxes_r_flip, False)
det_boxes_r_flip[:, 0::2] *= (src_w / resized_w)
det_boxes_r_flip[:, 1::2] *= (src_h / resized_h)
for ii in range(len(det_boxes_r_flip)):
box_rotate = det_boxes_r_flip[ii]
box_rotate[0::2] = box_rotate[0::2] + ww_
box_rotate[1::2] = (src_h - box_rotate[1::2]) + hh_
box_res_rotate.append(box_rotate)
label_res_rotate.append(det_category_r_flip[ii])
score_res_rotate.append(det_scores_r_flip[ii])
box_res_rotate = np.array(box_res_rotate)
label_res_rotate = np.array(label_res_rotate)
score_res_rotate = np.array(score_res_rotate)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
threshold = {'roundabout': 0.1, 'tennis-court': 0.3, 'swimming-pool': 0.1, 'storage-tank': 0.2,
'soccer-ball-field': 0.3, 'small-vehicle': 0.2, 'ship': 0.2, 'plane': 0.3,
'large-vehicle': 0.1, 'helicopter': 0.2, 'harbor': 0.0001, 'ground-track-field': 0.3,
'bridge': 0.0001, 'basketball-court': 0.3, 'baseball-diamond': 0.3}
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(label_res_rotate == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = box_res_rotate[index]
tmp_label_r = label_res_rotate[index]
tmp_score_r = score_res_rotate[index]
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=threshold[self.label_name_map[sub_class]],
# max_output_size=5000)
#
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32),
float(threshold[self.label_name_map[sub_class]]), 0)
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
result_dict = {'boxes': np.array(box_res_rotate_), 'scores': np.array(score_res_rotate_),
'labels': np.array(label_res_rotate_), 'image_id': img_path}
result_queue.put_nowait(result_dict)
def test_dota(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('./test_dota', self.cfgs.VERSION)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
if self.args.show_box:
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'dota_img_vis'))
draw_path = os.path.join(save_path, 'dota_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
# detected_boxes = backward_convert(res['boxes'], with_label=False)
detected_indices = res['scores'] >= self.cfgs.VIS_SCORE
detected_scores = res['scores'][detected_indices]
detected_boxes = res['boxes'][detected_indices]
detected_categories = res['labels'][detected_indices]
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=detected_boxes,
labels=detected_categories,
scores=detected_scores,
method=2,
is_csl=False,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
CLASS_DOTA = self.name_label_map.keys()
write_handle = {}
tools.makedirs(os.path.join(save_path, 'dota_res'))
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class] = open(os.path.join(save_path, 'dota_res', 'Task1_%s.txt' % sub_class), 'a+')
for i, rbox in enumerate(res['boxes']):
command = '%s %.3f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f\n' % (res['image_id'].split('/')[-1].split('.')[0],
res['scores'][i],
rbox[0], rbox[1], rbox[2], rbox[3],
rbox[4], rbox[5], rbox[6], rbox[7],)
write_handle[self.label_name_map[res['labels'][i]]].write(command)
for sub_class in CLASS_DOTA:
if sub_class == 'back_ground':
continue
write_handle[sub_class].close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
multi_lead_autoencoder.py | """
Creates a determinsitic autoencoder for dimension reduction of 4-lead ECG signals. Saves the encoded and reconstructed signals to the data folder.
"""
import numpy as np
import os
import threading
from tensorflow import keras
from tensorflow.keras.layers import Dense, Flatten, Reshape, Input, InputLayer, Dropout
from tensorflow.keras.models import Sequential, Model
def read_in(file_index, normalized):
"""
Reads in a file and can toggle between normalized and original files
:param file_index: patient number as string
:param normalized: boolean that determines whether the files should be normalized or not
:return: returns npy array of patient data across 4 leads
"""
if normalized == 1:
data = np.load(os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx" + file_index + ".npy"))
else:
data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy"))
return data
def build_autoencoder(sig_shape, encode_size):
"""
Builds a deterministic autoencoder, returning both the encoder and decoder models
:param sig_shape: shape of input signal
:param encode_size: dimension that we want to reduce to
:return: encoder, decoder models
"""
# Encoder
encoder = Sequential()
encoder.add(InputLayer(sig_shape))
encoder.add(Flatten())
# encoder.add(Dense(350, activation = 'tanh'))
encoder.add(Dense(200, activation = 'tanh', kernel_initializer='glorot_normal'))
encoder.add(Dense(125, activation='relu', kernel_initializer='glorot_normal'))
encoder.add(Dense(100, activation = 'relu', kernel_initializer='glorot_normal'))
encoder.add(Dense(50, activation='relu', kernel_initializer='glorot_normal'))
encoder.add(Dense(25, activation = 'relu', kernel_initializer='glorot_normal'))
encoder.add(Dense(encode_size))
# Decoder
decoder = Sequential()
decoder.add(InputLayer((encode_size,)))
decoder.add(Dense(25, activation = 'relu',kernel_initializer='glorot_normal'))
decoder.add(Dense(50, activation='relu', kernel_initializer='glorot_normal'))
decoder.add(Dense(100, activation = 'relu',kernel_initializer='glorot_normal'))
decoder.add(Dense(125, activation='relu', kernel_initializer='glorot_normal'))
decoder.add(Dense(200, activation = 'tanh',kernel_initializer='glorot_normal'))
decoder.add(Dense(np.prod(sig_shape), activation = 'linear'))
decoder.add(Reshape(sig_shape))
return encoder, decoder
def training_ae(num_epochs, reduced_dim, file_index):
"""
Training function for deterministic autoencoder, saves the encoded and reconstructed arrays
:param num_epochs: number of epochs to use
:param reduced_dim: goal dimension
:param file_index: patient number
:return: None
"""
data = read_in(file_index,1)
signal_shape = data.shape[1:]
encoder, decoder = build_autoencoder(signal_shape, reduced_dim)
inp = Input(signal_shape)
encode = encoder(inp)
reconstruction = decoder(encode)
autoencoder = Model(inp, reconstruction)
autoencoder.compile(optimizer='Adam', loss='mse')
mod = autoencoder.fit(x=data, y=data, epochs=num_epochs)
encoded = encoder.predict(data)
reconstruction = decoder.predict(encoded)
reconstruction_save = os.path.join("Working_Data", "reconstructed_ae_" + str(reduced_dim) + "d_Idx" + str(file_index) + ".npy")
encoded_save = os.path.join("Working_Data", "reduced_ae_" + str(reduced_dim) + "d_Idx" + str(file_index) + ".npy")
np.save(reconstruction_save, reconstruction)
np.save(encoded_save,encoded)
def run_over(num_epochs, encoded_dim):
"""
Run training autoencoder over all dims in list
:param dims: dimension to run on
:return None, saves arrays for reconstructed and dim reduced arrays
"""
indices = ['1','4','5','6','7','8','10','11','12','14','16','17','18','19','20','21','22','25','27','28','30','31','32',
'33','34','35','37','38','39','40','41','42','44','45','46','47','48','49','50','52','53','54','55','56']
for patient_ in indices:
print("Starting on index: " + str(patient_))
training_ae(num_epochs, encoded_dim, patient_)
print("Completed " + patient_ + " reconstruction and encoding")
if __name__ == "__main__":
threads = []
for i in range(5,10):
t1 = threading.Thread(target=run_over, args=(50,i))
t1.start()
threads.append(t1)
for x in threads:
x.join()
threads = []
for i in range(10,15):
t1 = threading.Thread(target=run_over, args=(50,i))
t1.start()
threads.append(t1)
for x in threads:
x.join()
|
test_index.py | """
For testing index operations, including `create_index`, `get_index_info` and `drop_index` interfaces
"""
import logging
import pytest
import time
import pdb
import threading
from multiprocessing import Pool, Process
import numpy
import sklearn.preprocessing
from milvus import IndexType, MetricType
from utils import *
nb = 6000
dim = 128
index_file_size = 10
vectors = gen_vectors(nb, dim)
vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2')
vectors = vectors.tolist()
BUILD_TIMEOUT = 300
nprobe = 1
tag = "1970-01-01"
NLIST = 4046
INVALID_NLIST = 100000000
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(collection, tag)
status, ids = connect.insert(collection, vectors, partition_tag=tag)
connect.flush()
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVF_SQ8
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
logging.getLogger().info(connect.get_index_info(collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search(collection, top_k, query_vecs, params=search_param)
assert status.OK()
assert len(result) == len(query_vecs)
logging.getLogger().info(result)
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_multithread(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.insert(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_multithread_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
threads_num = 8
loop_num = 8
threads = []
collection = []
j = 0
while j < (threads_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.insert(collection[ids*threads_num+i], vectors)
status = connect.create_index(collection[ids*threads_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(collection[ids*threads_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
ids = i
t = threading.Thread(target=create_index, args=(m, ids))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def test_create_index_a_multithreads(self, connect, collection, args):
status, ids = connect.insert(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
def count(connect):
status, count = connect.count_entities(collection)
assert status.OK()
assert count == nb
threads_num = 8
threads = []
uri = "tcp://%s:%s" % (args["ip"], args["port"])
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
if(i % 2 == 0):
p = threading.Thread(target=build, args=(m,))
else:
p = threading.Thread(target=count, args=(m,))
threads.append(p)
p.start()
time.sleep(0.2)
for p in threads:
p.join()
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.insert(collection, vectors)
def build(connect):
status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
for i in range(process_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.insert(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(process_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_collection_not_existed(self, connect):
'''
target: test create index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, create index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, index_type, index_param)
assert not status.OK()
def test_create_index_collection_None(self, connect):
'''
target: test create index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, create index failed
'''
collection_name = None
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
with pytest.raises(Exception) as e:
status = connect.create_index(collection_name, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_insert(self, connect, collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status, ids = connect.insert(collection, vectors)
assert status.OK()
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(collection, index_type, index_param)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.insert(collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.get_index_info(collection)
assert result._params["nlist"] == nlist
assert result._collection_name == collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `get_index_info` function
******************************************************************
"""
def test_get_index_info(self, connect, collection, get_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
if status.OK():
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of L2
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.insert(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.get_index_info(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.get_index_info(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_get_index_info_without_connect(self, dis_connect, collection):
# '''
# target: test describe index without connection
# method: describe index, and check if describe successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.get_index_info(collection)
def test_get_index_info_collection_not_existed(self, connect):
'''
target: test describe index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status, result = connect.get_index_info(collection_name)
assert not status.OK()
def test_get_index_info_collection_None(self, connect):
'''
target: test describe index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, describe index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.get_index_info(collection_name)
def test_get_index_info_not_create(self, connect, collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.insert(collection, vectors)
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_drop_index_without_connect(self, dis_connect, collection):
# '''
# target: test drop index without connection
# method: drop index, and check if drop successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_index(collection)
def test_drop_index_collection_not_existed(self, connect):
'''
target: test drop index interface when collection name not existed
method: create collection and add vectors in it, create index
, make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
'''
collection_name = gen_unique_str(self.__class__.__name__)
status = connect.drop_index(collection_name)
assert not status.OK()
def test_drop_index_collection_None(self, connect):
'''
target: test drop index interface when collection name is None
method: create collection and add vectors in it, create index with an collection_name: None
expected: return code not equals to 0, drop index failed
'''
collection_name = None
with pytest.raises(Exception) as e:
status = connect.drop_index(collection_name)
def test_drop_index_collection_not_create(self, connect, collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.insert(collection, vectors)
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(collection)
logging.getLogger().info(status)
assert status.OK()
@pytest.mark.level(2)
def test_create_drop_index_repeatly(self, connect, collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.insert(collection, vectors)
for i in range(2):
status = connect.create_index(collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
# status, ids = connect.insert(collection, vectors)
for i in range(2):
status = connect.create_index(collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
status = connect.drop_index(collection)
assert status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
class TestIndexIP:
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
if request.param["index_type"] == IndexType.RNSG:
pytest.skip("rnsg not support in ip")
return request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.level(2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_collection(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index on collection
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.insert(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, ip_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVF_SQ8
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(ip_collection, index_type, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index, with no manual flush
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
logging.getLogger().info(connect.get_collection_info(ip_collection))
status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
logging.getLogger().info(connect.get_index_info(ip_collection))
logging.getLogger().info(connect.get_collection_stats(ip_collection))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search(ip_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.level(2)
def _test_create_index_multiprocessing(self, connect, ip_collection, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
status, ids = connect.insert(ip_collection, vectors)
def build(connect):
status = connect.create_index(ip_collection, IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
process_num = 8
processes = []
for i in range(process_num):
m = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=build, args=(m,))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
def _test_create_index_multiprocessing_multicollection(self, connect, args):
'''
target: test create index interface with multiprocess
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
process_num = 8
loop_num = 8
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_create_index_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim}
connect.create_collection(param)
j = j + 1
def create_index():
i = 0
while i < loop_num:
# assert connect.has_collection(collection[ids*process_num+i])
status, ids = connect.insert(collection[ids*process_num+i], vectors)
status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST})
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(collection[ids*process_num+i], top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
assert result[0][0].distance == 0.0
i = i + 1
for i in range(process_num):
m = get_milvus(args["ip"], args["port"], handler=args["handler"])
ids = i
p = Process(target=create_index, args=(m,ids))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_create_index_no_vectors(self, connect, ip_collection):
'''
target: test create index interface when there is no vectors in collection
method: create collection and add no vectors in it, and then create index
expected: return code equals to 0
'''
nlist = NLIST
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_then_insert(self, connect, ip_collection, get_simple_index):
'''
target: test create index interface when there is no vectors in collection, and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index, add vectors in it
expected: return code equals to 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_index(ip_collection, index_type, index_param)
status, ids = connect.insert(ip_collection, vectors)
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
'''
nlist = NLIST
status, ids = connect.insert(ip_collection, vectors)
index_type = IndexType.IVF_SQ8
index_param = {"nlist": nlist}
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
query_vec = [vectors[0]]
top_k = 1
search_param = {"nprobe": nprobe}
status, result = connect.search(ip_collection, top_k, query_vec, params=search_param)
assert len(result) == 1
assert len(result[0]) == top_k
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, ip_collection):
'''
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
'''
nlist = NLIST
status, ids = connect.insert(ip_collection, vectors)
index_type_1 = IndexType.IVF_SQ8
index_type_2 = IndexType.IVFLAT
indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}]
logging.getLogger().info(indexs)
for index in indexs:
status = connect.create_index(ip_collection, index["index_type"], index["index_param"])
assert status.OK()
status, result = connect.get_index_info(ip_collection)
assert result._params["nlist"] == nlist
assert result._collection_name == ip_collection
assert result._index_type == index_type_2
"""
******************************************************************
The following cases are used to test `get_index_info` function
******************************************************************
"""
def test_get_index_info(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
# status, ids = connect.insert(ip_collection, vectors[:5000])
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
status, mode = connect._cmd("mode")
if str(mode) == "GPU" and index_type == IndexType.IVF_PQ:
assert result._index_type == IndexType.FLAT
assert result._params["nlist"] == NLIST
else:
assert result._index_type == index_type
assert result._params == index_param
def test_get_index_info_partition(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status, ids = connect.insert(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_get_index_info_partition_A(self, connect, ip_collection, get_simple_index):
'''
target: test describe index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call describe index
expected: return code 0, and index instructure
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
# status, ids = connect.insert(ip_collection, vectors, partition_tag=tag)
# status, ids = connect.insert(ip_collection, vectors, partition_tag=new_tag)
status = connect.create_index(ip_collection, index_type, index_param)
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ip_collection
assert result._index_type == index_type
def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index):
'''
target: test create, describe and drop index interface with multiple collections of IP
method: create collections and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(10):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
status, ids = connect.insert(collection_name=collection_name, records=vectors)
status = connect.create_index(collection_name, index_type, index_param)
assert status.OK()
for i in range(10):
status, result = connect.get_index_info(collection_list[i])
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == collection_list[i]
assert result._index_type == index_type
for i in range(10):
status = connect.drop_index(collection_list[i])
assert status.OK()
status, result = connect.get_index_info(collection_list[i])
logging.getLogger().info(result)
assert result._collection_name == collection_list[i]
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_get_index_info_without_connect(self, dis_connect, ip_collection):
# '''
# target: test describe index without connection
# method: describe index, and check if describe successfully
# expected: raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.get_index_info(ip_collection)
def test_get_index_info_not_create(self, connect, ip_collection):
'''
target: test describe index interface when index not created
method: create collection and add vectors in it, create index
, make sure the collection name not in index
expected: return code not equals to 0, describe index failed
'''
status, ids = connect.insert(ip_collection, vectors)
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert status.OK()
# assert result._params["nlist"] == index_params["nlist"]
# assert result._collection_name == collection
# assert result._index_type == index_params["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status, ids = connect.insert(ip_collection, vectors, partition_tag=tag)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_C(self, connect, ip_collection, get_simple_index):
'''
target: test drop index interface
method: create collection, create partitions and add vectors in it, create index on partitions, call drop partition index
expected: return code 0, and default index param
'''
new_tag = "new_tag"
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status = connect.create_partition(ip_collection, tag)
status = connect.create_partition(ip_collection, new_tag)
status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test drop index repeatly
method: create index, call drop index, and drop again
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
# status, ids = connect.insert(ip_collection, vectors)
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ):
assert not status.OK()
else:
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
# @pytest.mark.level(2)
# def test_drop_index_without_connect(self, dis_connect, ip_collection):
# '''
# target: test drop index without connection
# method: drop index, and check if drop successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_type = IndexType.IVFLAT
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_index(ip_collection, index_type, index_param)
def test_drop_index_collection_not_create(self, connect, ip_collection):
'''
target: test drop index interface when index not created
method: create collection and add vectors in it, create index
expected: return code not equals to 0, drop index failed
'''
status, ids = connect.insert(ip_collection, vectors)
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
# no create index
status = connect.drop_index(ip_collection)
logging.getLogger().info(status)
assert status.OK()
@pytest.mark.level(2)
def test_create_drop_index_repeatly(self, connect, ip_collection, get_simple_index):
'''
target: test create / drop index repeatly, use the same index params
method: create index, drop index, four times
expected: return code 0
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.insert(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_collection):
'''
target: test create / drop index repeatly, use the different index params
method: create index, drop index, four times, each tme use different index_params to create index
expected: return code 0
'''
nlist = NLIST
indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}]
status, ids = connect.insert(ip_collection, vectors)
for i in range(2):
status = connect.create_index(ip_collection, indexs[i]["index_type"], indexs[i]["index_param"])
assert status.OK()
status, result = connect.get_index_info(ip_collection)
assert result._params == indexs[i]["index_param"]
assert result._collection_name == ip_collection
assert result._index_type == indexs[i]["index_type"]
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
status = connect.drop_index(ip_collection)
assert status.OK()
status, result = connect.get_index_info(ip_collection)
logging.getLogger().info(result)
assert result._collection_name == ip_collection
assert result._index_type == IndexType.FLAT
class TestIndexJAC:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.insert(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.insert(jac_collection, self.vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, jac_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(jac_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, jac_collection, get_jaccard_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status, ids = connect.insert(jac_collection, self.vectors)
status = connect.create_index(jac_collection, index_type, index_param)
logging.getLogger().info(connect.get_index_info(jac_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search(jac_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `get_index_info` function
******************************************************************
"""
def test_get_index_info(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
# status, ids = connect.insert(jac_collection, vectors[:5000])
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == index_type
assert result._params == index_param
def test_get_index_info_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
logging.getLogger().info(get_jaccard_index)
status = connect.create_partition(jac_collection, tag)
status, ids = connect.insert(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == jac_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, jac_collection, get_jaccard_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
status = connect.create_partition(jac_collection, tag)
status, ids = connect.insert(jac_collection, vectors, partition_tag=tag)
status = connect.create_index(jac_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
status = connect.drop_index(jac_collection)
assert status.OK()
status, result = connect.get_index_info(jac_collection)
logging.getLogger().info(result)
assert result._collection_name == jac_collection
assert result._index_type == IndexType.FLAT
class TestIndexBinary:
tmp, vectors = gen_binary_vectors(nb, dim)
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ or request.param["index_type"] == IndexType.HNSW:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.insert(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.insert(ham_collection, self.vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_entities(ham_collection)
assert res == len(self.vectors)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_structure(self, connect, substructure_collection, get_substructure_index):
'''
target: test create index interface
method: create collection, create partition, and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
logging.getLogger().info(get_substructure_index)
status = connect.create_partition(substructure_collection, tag)
status, ids = connect.insert(substructure_collection, self.vectors, partition_tag=tag)
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, res = connect.count_entities(substructure_collection,)
assert res == len(self.vectors)
# @pytest.mark.level(2)
# def test_create_index_without_connect(self, dis_connect, ham_collection):
# '''
# target: test create index without connection
# method: create collection and add vectors in it, check if added successfully
# expected: raise exception
# '''
# nlist = NLIST
# index_param = {"nlist": nlist}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_index(ham_collection, IndexType.IVF_SQ8, index_param)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, ham_collection, get_hamming_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status, ids = connect.insert(ham_collection, self.vectors)
status = connect.create_index(ham_collection, index_type, index_param)
logging.getLogger().info(connect.get_index_info(ham_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search(ham_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test create index interface, search with more query vectors
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status, ids = connect.insert(superstructure_collection, self.vectors)
status = connect.create_index(superstructure_collection, index_type, index_param)
logging.getLogger().info(connect.get_index_info(superstructure_collection))
query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]]
top_k = 5
search_param = get_search_param(index_type)
status, result = connect.search(superstructure_collection, top_k, query_vecs, params=search_param)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
"""
******************************************************************
The following cases are used to test `get_index_info` function
******************************************************************
"""
def test_get_index_info(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
# status, ids = connect.insert(jac_collection, vectors[:5000])
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == index_type
assert result._params == index_param
def test_get_index_info_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
logging.getLogger().info(get_hamming_index)
status = connect.create_partition(ham_collection, tag)
status, ids = connect.insert(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == ham_collection
assert result._index_type == index_type
def test_get_index_info_partition_superstructrue(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test describe index interface
method: create collection, create partition and add vectors in it, create index, call describe index
expected: return code 0, and index instructure
'''
index_param = get_superstructure_index["index_param"]
index_type = get_superstructure_index["index_type"]
logging.getLogger().info(get_superstructure_index)
status = connect.create_partition(superstructure_collection, tag)
status, ids = connect.insert(superstructure_collection, vectors, partition_tag=tag)
status = connect.create_index(superstructure_collection, index_type, index_param)
status, result = connect.get_index_info(superstructure_collection)
logging.getLogger().info(result)
assert result._params == index_param
assert result._collection_name == superstructure_collection
assert result._index_type == index_type
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
def test_drop_index(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
# status, ids = connect.insert(ip_collection, vectors)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test drop index interface
method: create collection and add vectors in it, create index, call drop index
expected: return code 0, and default index param
'''
index_param = get_substructure_index["index_param"]
index_type = get_substructure_index["index_type"]
status, mode = connect._cmd("mode")
assert status.OK()
status = connect.create_index(substructure_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(substructure_collection)
logging.getLogger().info(result)
status = connect.drop_index(substructure_collection)
assert status.OK()
status, result = connect.get_index_info(substructure_collection)
logging.getLogger().info(result)
assert result._collection_name == substructure_collection
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ham_collection, get_hamming_index):
'''
target: test drop index interface
method: create collection, create partition and add vectors in it, create index on collection, call drop collection index
expected: return code 0, and default index param
'''
index_param = get_hamming_index["index_param"]
index_type = get_hamming_index["index_type"]
status = connect.create_partition(ham_collection, tag)
status, ids = connect.insert(ham_collection, vectors, partition_tag=tag)
status = connect.create_index(ham_collection, index_type, index_param)
assert status.OK()
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
status = connect.drop_index(ham_collection)
assert status.OK()
status, result = connect.get_index_info(ham_collection)
logging.getLogger().info(result)
assert result._collection_name == ham_collection
assert result._index_type == IndexType.FLAT
class TestIndexCollectionInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
nlist = NLIST
index_param = {"nlist": nlist}
status = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param)
assert not status.OK()
@pytest.mark.level(1)
def test_get_index_info_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status, result = connect.get_index_info(collection_name)
assert not status.OK()
@pytest.mark.level(1)
def test_drop_index_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
status = connect.drop_index(collection_name)
assert not status.OK()
class TestCreateIndexParamsInvalid(object):
"""
Test Building index with invalid collection names, collection names not in db
"""
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.level(1)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
index_param = get_index["index_param"]
index_type = get_index["index_type"]
logging.getLogger().info(get_index)
# status, ids = connect.insert(collection, vectors)
if (not index_type) or (not isinstance(index_type, IndexType)):
with pytest.raises(Exception) as e:
status = connect.create_index(collection, index_type, index_param)
else:
status = connect.create_index(collection, index_type, index_param)
assert not status.OK()
"""
Test Building index with invalid nlist
"""
@pytest.fixture(
scope="function",
params=[IndexType.FLAT,IndexType.IVFLAT,IndexType.IVF_SQ8,IndexType.IVF_SQ8H]
)
def get_index_type(self, request):
yield request.param
def test_create_index_with_invalid_nlist(self, connect, collection, get_index_type):
status, ids = connect.insert(collection, vectors)
status = connect.create_index(collection, get_index_type, {"nlist": INVALID_NLIST})
if get_index_type != IndexType.FLAT:
assert not status.OK()
'''
Test Building index with empty params
'''
def test_create_index_with_empty_param(self, connect, collection, get_index_type):
logging.getLogger().info(get_index_type)
status = connect.create_index(collection, get_index_type, {})
if get_index_type != IndexType.FLAT :
assert not status.OK()
status, result = connect.get_index_info(collection)
logging.getLogger().info(result)
assert result._collection_name == collection
assert result._index_type == IndexType.FLAT
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("ivfpq not support in GPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if str(connect._cmd("mode")[1]) == "GPU":
# if request.param["index_type"] == IndexType.IVF_PQ:
if request.param["index_type"] not in [IndexType.IVF_FLAT]:
# pytest.skip("ivfpq not support in GPU mode")
pytest.skip("debug ivf_flat in GPU mode")
return request.param
def check_status(self, status):
logging.getLogger().info("In callback check status")
assert status.OK()
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
'''
target: test create index interface
method: create collection and add vectors in it, create index
expected: return code equals to 0, and search success
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
logging.getLogger().info(get_simple_index)
vectors = gen_vectors(nb, dim)
status, ids = connect.insert(collection, vectors)
logging.getLogger().info("start index")
# future = connect.create_index(collection, index_type, index_param, _async=True, _callback=self.check_status)
future = connect.create_index(collection, index_type, index_param, _async=True)
logging.getLogger().info("before result")
status = future.result()
assert status.OK()
def test_create_index_with_invalid_collectionname(self, connect):
collection_name = " "
nlist = NLIST
index_param = {"nlist": nlist}
future = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param, _async=True)
status = future.result()
assert not status.OK()
|
app_one_views.py | # -*- coding: utf-8 -*-
import datetime
import redis
from flask import Blueprint, request, session, make_response, current_app, redirect, url_for
from threading import Thread
from flask_mail import Mail, Message
from flask_restx import Api, Resource
from server.api.api_server import api_server
from server.config.config import Config
from server.dbs.db import engine_psql, session_psql, session_mysql, engine_mysql, redis_session
from server.models.example_model.example_model import User
import uuid
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app_one_api = Blueprint('app_one_api', __name__)
api_one = Api(app_one_api)
def verify_token(token):
'''
校验token
:param token:
:return: 用户信息 or None
'''
# 参数为私有秘钥,跟上面方法的秘钥保持一致
s = Serializer(current_app.config["SECRET_KEY"])
try:
# 转换为字典
data = s.loads(token)
return True
except Exception as ex:
print(ex)
return False
def create_token(user_id):
'''
生成token
:param api_user:用户id
:return: token
'''
# 第一个参数是内部的私钥,这里写在共用的配置信息里了,如果只是测试可以写死
# 第二个参数是有效期(秒)
s = Serializer(current_app.config["SECRET_KEY"], expires_in=3600*24*180)
# 接收用户id转换与编码
token = s.dumps({"id": user_id}).decode("ascii")
return token
@api_one.route('/register')
class Register(Resource):
# 接收get请求
@staticmethod
def post():
mail = request.json['mail']
password = request.json['password']
# if 'logined' not in session:
with session_psql() as sess:
query = sess.query(User).filter(User.mail == mail).first()
if query is None:
thred = Thread(target=Register.userRegisterRemind, args=())
thred.start()
userid = uuid.uuid4().hex
session['userid'] = userid
session['token'] = create_token(user_id=userid)
session.permanent = datetime.timedelta(days=180)
obj = User(
mail=mail,
password=password,
uuid=userid
)
sess.add(obj)
return True
else:
return False
@staticmethod
def userRegisterRemind():
mail = Mail(api_server)
with api_server.app_context():
message = Message(subject='AlanChat新用户注册提醒!', recipients=[api_server.config.get('MAIL_USERNAME'), ],
body='有新用户注册啦!')
mail.send(message=message)
@api_one.route('/login')
class Login(Resource):
# 接收get请求
@staticmethod
def post():
mail = request.json['mail']
password = request.json['password']
with session_psql() as sess:
query = sess.query(User).filter(User.mail == mail).first()
if query is None or not query.check_password(password):
return False
else:
session['userid'] = query.uuid
session['token'] = create_token(user_id=query.uuid)
session.permanent = datetime.timedelta(days=180)
return [True, query.name, query.avatar, query.uuid]
@api_one.route('/getUserInfo')
class GetUserInfo(Resource):
# 接收get请求
@staticmethod
def get():
userid = session['userid']
with session_psql() as sess:
query = sess.query(User.name, User.avatar, User.uuid).filter(User.uuid == userid).first()
return [query[0], query[1], query[2]]
@api_one.route('/initChatList')
class InitChatList(Resource):
# 接收get请求
@staticmethod
def get():
userid = session['userid']
all_user = []
with session_psql() as sess:
query = sess.query(User).all()
for i in range(len(query)):
data = query[i]
info = {
'id': i + 1,
'user': {
'name': data.name,
'img': data.avatar,
'user_id': data.uuid
},
'messages': [
{
'content': '你好,我们已经是新朋友了',
'date': str(datetime.datetime.now())
}
],
'index': i + 1
}
all_user.append(info)
return all_user
@api_one.route('/judgeLoginStatus')
class Wss(Resource):
# 接收get请求
@staticmethod
def get():
res = verify_token(session.get('token'))
return res
@api_one.route('/getSession')
class Wss(Resource):
# 接收get请求
@staticmethod
def get():
return 'hello world'
@app_one_api.after_request
def af_req(resp):
resp = make_response(resp)
resp.headers['Access-Control-Allow-Credentials'] = 'true'
resp.headers['Access-Control-Allow-Origin'] = Config.accessControlAllowOrigin
resp.headers['Access-Control-Allow-Methods'] = 'PUT,POST,GET,DELETE,OPTIONS'
resp.headers['Access-Control-Allow-Headers'] = 'Content-Type, Content-Length, Authorization, Accept, ' \
'X-Requested-With , yourHeaderFeild '
resp.headers['X-Powered-By'] = '3.2.1'
resp.headers['Content-Type'] = 'application/json;charset=utf-8'
return resp
@api_one.route('/test_redis')
class Redis(Resource):
# 接收get请求
@staticmethod
def get():
return redis_session.get('name')
|
test_exception.py | # coding=utf-8
import threading
from qrpc.client import RpcClient
from qrpc.server import Server
def test_fault_exception():
from qrpc.exceptions import RPCFaultException
HOST_PORT = ('127.0.0.1', 8080)
zero_division_error = RPCFaultException(
code=99,
message="ZeroDivisionError: integer division or modulo by zero"
)
server = Server()
@server.registe("service/div")
def test_div(x, y):
if y == 0:
raise zero_division_error
return x / y
t = threading.Thread(target=server.run, args=HOST_PORT)
t.setDaemon(True)
t.start()
rpc = RpcClient(*HOST_PORT)
div_result = rpc.service.div.call(x=1, y=0)
try:
div_result.data
except RPCFaultException as e:
assert e.code == zero_division_error.code
assert e.message == zero_division_error.message
def test_communication_exception():
from qrpc.exceptions import RPCCommunicationException
SERVER_HOST_PORT = ('127.0.0.1', 8080)
CLIENT_HOST_PORT = ('127.0.0.1', 9090)
server = Server()
@server.registe("service/hello")
def test_hello(name=None):
if name:
return "hello " + name
return "hello anonymous"
t = threading.Thread(target=server.run, args=SERVER_HOST_PORT)
t.setDaemon(True)
t.start()
rpc = RpcClient(*CLIENT_HOST_PORT)
hello_result = rpc.service.hello.call(name="world")
try:
hello_result.data
except RPCCommunicationException as e:
pass
else:
# Make sure that rpc call must throw RPCCommunicationException
raise
|
result_detail.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import datetime
import math
import os
import pickle
import re
import sys
import threading
import traceback
from datetime import datetime
import cv2
import numpy as np
from ikalog.api import APIClient
from ikalog.scenes.stateful_scene import StatefulScene
from ikalog.inputs.filters import OffsetFilter
from ikalog.utils import *
from ikalog.utils.player_name import *
class ResultDetail(StatefulScene):
def evaluate_image_accuracy(self, frame):
r_win = self.mask_win.match_score(frame)[1]
r_lose = self.mask_lose.match_score(frame)[1]
r_x = self.mask_x.match_score(frame)[1]
loss_win = (1.0 - r_win) ** 2
loss_lose = (1.0 - r_lose) ** 2
loss_x = (1.0 - r_x) ** 2
return 1.0 - math.sqrt((loss_win + loss_lose + loss_x) / 3)
#
# AKAZE ベースのオフセット/サイズ調整
#
def result_detail_normalizer(self, img):
# キーポイントとして不要な部分を削除
img = copy.deepcopy(img)
cv2.rectangle(img, (0, 000), (680, 720), (0, 0, 0), -1)
# 特徴画像の生成
white_filter = matcher.MM_WHITE()
dark_filter = matcher.MM_DARK(visibility=(0, 16))
img_w = white_filter(img)
img_dark = 255 - dark_filter(img)
img_features = img_dark + img_w
img_features[:, 1000:1280] = \
img_dark[:, 1000:1280] - img_w[:, 1000:1280]
# cv2.imshow('features', img_features)
# cv2.waitKey(10000)
return img_features
def get_keypoints(self, img):
detector = cv2.AKAZE_create()
keypoints, descriptors = detector.detectAndCompute(
img,
None,
)
return keypoints, descriptors
def filter_matches(self, kp1, kp2, matches, ratio=0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append(kp1[m.queryIdx])
mkp2.append(kp2[m.trainIdx])
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs
def tuples_to_keypoints(self, tuples):
new_l = []
for point in tuples:
pt, size, angle, response, octave, class_id = point
new_l.append(cv2.KeyPoint(
pt[0], pt[1], size, angle, response, octave, class_id))
return new_l
def keypoints_to_tuples(self, points):
new_l = []
for point in points:
new_l.append((point.pt, point.size, point.angle, point.response, point.octave,
point.class_id))
return new_l
def load_model_from_file(self, filename):
f = open(filename, 'rb')
l = pickle.load(f)
f.close()
self.ref_image_geometry = l[0]
self.ref_keypoints = self.tuples_to_keypoints(l[1])
self.ref_descriptors = l[2]
def save_model_to_file(self, filename):
f = open(filename, 'wb')
pickle.dump([
self.ref_image_geometry,
self.keypoints_to_tuples(self.ref_keypoints),
self.ref_descriptors,
], f)
f.close()
def rebuild_model(self, dest_filename, src_filename=None, img=None, normalizer_func=None):
if img is None:
img = imread(src_filename, 0)
assert img is not None
if normalizer_func is not None:
img = normalizer_func(img)
assert img is not None
self.ref_keypoints, self.ref_descriptors = \
self.get_keypoints(img)
self.ref_image_geometry = img.shape[:2]
self.save_model_to_file(dest_filename)
IkaUtils.dprint('%s: Created model data %s' % (self, dest_filename))
def load_akaze_model(self):
model_filename = IkaUtils.get_path(
'data', 'result_detail_features.akaze.model')
try:
self.load_model_from_file(model_filename)
if self.ref_keypoints == None:
raise
except:
IkaUtils.dprint(
'%s: Failed to load akaze model. trying to rebuild...' % self)
self.rebuild_model(
model_filename,
img=imread('data/result_detail_features.png'),
normalizer_func=self.result_detail_normalizer
)
self.load_model_from_file(model_filename)
def auto_warp(self, context):
# 画面のオフセットを自動検出して image を返す (AKAZE利用)
frame = context['engine'].get('frame', None)
if frame is None:
return None
keypoints, descs = self.get_keypoints(
self.result_detail_normalizer(frame))
matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
raw_matches = matcher.knnMatch(
descs,
trainDescriptors=self.ref_descriptors,
k=2
)
p2, p1, kp_pairs = self.filter_matches(
keypoints,
self.ref_keypoints,
raw_matches,
)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
else:
H, status = None, None
print('%d matches found, not enough for homography estimation' % len(p1))
raise
w = 1280
h = 720
corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
pts2 = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
pts1 = np.float32(cv2.perspectiveTransform(
corners.reshape(1, -1, 2), H).reshape(-1, 2) + (0, 0))
M = cv2.getPerspectiveTransform(pts1, pts2)
# out = cv2.drawKeypoints(img2, keypoints1, None)
new_frame = cv2.warpPerspective(frame, M, (w, h))
# 変形した画像がマスクと一致するか?
matched = ImageUtils.match_with_mask(
new_frame, self.winlose_gray, 0.997, 0.22)
if matched:
return new_frame
IkaUtils.dprint('%s: auto_warp() function broke the image.' % self)
return None
def adjust_method_generic(self, context, l):
frame = context['engine']['frame']
# WIN/LOSE の表示部分の上側にある黒枠の幅を測る
img1 = frame[:30, 30:50, :]
img2 = np.sum(img1, axis=2)
img2 = np.sum(img2, axis=1)
img3 = np.array(range(img2.shape[0]))
img3[img2 > 0] = 0
v_margin = np.amax(img3)
if v_margin > 0:
my = v_margin + 1
mx = int(my * 1280 / 720)
new_frame = cv2.resize(frame[my: -my, :], (1280, 720))
l.append({
'frame': new_frame,
'score': self.evaluate_image_accuracy(new_frame),
'desc': 'Wrong resolution & aspect'
})
new_frame = cv2.resize(frame[my: -my, mx:-mx], (1280, 720))
l.append({
'frame': new_frame,
'score': self.evaluate_image_accuracy(new_frame),
'desc': 'Wrong resolution'
})
l.append({
'frame': frame,
'score': self.evaluate_image_accuracy(frame),
'acceptable': True,
})
def adjust_method_offset(self, context, l):
# Detect slide offset
filter = OffsetFilter(self)
filter.enable()
# filter が必要とするので...
self.out_width = 1280
self.out_height = 720
best_match = (context['engine']['frame'], 0.0, 0, 0)
offset_list = [0, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5]
gray_frame = cv2.cvtColor(context['engine']['frame'], cv2.COLOR_BGR2GRAY)
for ox in offset_list:
for oy in offset_list:
filter.offset = (ox, oy)
img = filter.execute(gray_frame)
score = self.evaluate_image_accuracy(img)
if best_match[1] < score:
best_match = (img, score, ox, oy)
if 0:
l.append({
'frame': img,
'score': score,
'desc': 'Offset (%s, %s)' % (ox, oy),
'offset': (ax, ay)
})
if best_match[2] != 0 or best_match[3] != 0:
filter.offset = (best_match[2], best_match[3])
new_frame = filter.execute(context['engine']['frame'])
l.append({
'frame': new_frame,
'score': score,
'desc': 'Offset (%s, %s)' % (best_match[2], best_match[3]),
'acceptable': True,
'offset': (best_match[2], best_match[3]),
})
def adjust_image(self, context):
l = []
self.adjust_method_generic(context, l)
self.adjust_method_offset(context, l)
if len(l) > 0:
best = sorted(l, key=lambda x: x['score'], reverse=True)[0]
img = best['frame']
if best.get('desc', None):
IkaUtils.dprint(
'%s: Capture setting might be wrong. %s (recover score=%f)' %
(self, best['desc'], best['score']))
self._call_plugins_later(
'on_result_detail_log',
params={'desc': best['desc']}
)
if best.get('offset',None):
self._call_plugins('on_result_detail_calibration', best.get('offset'))
else:
# Should not reach here
IkaUtils.dprint('%s: [BUG] Failed to normalize image' % self)
img = context['engine']['frame']
if 0:
for e in l:
print(e['score'], e.get('desc', '(none)'))
return img
def async_recoginiton_worker(self, context):
IkaUtils.dprint('%s: weapons recoginition started.' % self)
weapons_list = []
for player in context['game']['players']:
weapons_list.append(player.get('img_weapon', None))
# local
try:
if self._client_local is not None:
weapon_response_list = self._client_local.recoginize_weapons(
weapons_list)
for entry_id in range(len(weapon_response_list)):
context['game']['players'][entry_id]['weapon'] = \
weapon_response_list[entry_id]
except:
IkaUtils.dprint('Exception occured in weapon recoginization.')
IkaUtils.dprint(traceback.format_exc())
# remote
try:
if self._client_remote is not None:
weapon_response_list = self._client_remote.recoginize_weapons(
weapons_list)
for entry_id in range(len(weapon_response_list)):
context['game']['players'][entry_id]['weapon'] = \
weapon_response_list[entry_id]
except:
IkaUtils.dprint('Exception occured in weapon recoginization.')
IkaUtils.dprint(traceback.format_exc())
IkaUtils.dprint('%s: weapons recoginition done.' % self)
if 0:
self._detect_names_per_my_kill(context)
self._analyze_kills_per_weapon(context)
self._analyze_kills_per_player(context)
self._call_plugins_later('on_result_detail')
self._call_plugins_later('on_game_individual_result')
def is_entry_me(self, img_entry):
# ヒストグラムから、入力エントリが自分かを判断
if len(img_entry.shape) > 2 and img_entry.shape[2] != 1:
img_me = cv2.cvtColor(img_entry[:, 0:43], cv2.COLOR_BGR2GRAY)
else:
img_me = img_entry[:, 0:43]
img_me = cv2.threshold(img_me, 230, 255, cv2.THRESH_BINARY)[1]
me_score = np.sum(img_me)
me_score_normalized = 0
try:
me_score_normalized = me_score / (43 * 45 * 255 / 10)
except ZeroDivisionError as e:
me_score_normalized = 0
#print("score=%3.3f" % me_score_normalized)
return (me_score_normalized > 1)
def guess_fest_title_ja(self, img_fest_title):
img_fest_title_hsv = cv2.cvtColor(img_fest_title, cv2.COLOR_BGR2HSV)
yellow = cv2.inRange(img_fest_title_hsv[:, :, 0], 32 - 2, 32 + 2)
yellow2 = cv2.inRange(img_fest_title_hsv[:, :, 2], 240, 255)
img_fest_title_mask = yellow & yellow2
is_fes = np.sum(img_fest_title_mask) > img_fest_title_mask.shape[
0] * img_fest_title_mask.shape[1] * 16
# 文字と判断したところを 1 にして縦に足し算
img_fest_title_hist = np.sum(
img_fest_title_mask / 255, axis=0) # 列毎の検出dot数
a = np.array(range(len(img_fest_title_hist)), dtype=np.int32)
b = np.extract(img_fest_title_hist > 0, a)
x1 = np.amin(b)
x2 = np.amax(b)
if (x2 - x1) < 4:
return None, None, None
# 最小枠で crop
img_fest_title_new = img_fest_title[:, x1:x2]
# ボーイ/ガールは経験上横幅 56 ドット
gender_x1 = x2 - 36
gender_x2 = x2
img_fest_gender = img_fest_title_mask[:, gender_x1:gender_x2]
# ふつうの/まことの/スーパー/カリスマ/えいえん
img_fest_level = img_fest_title_mask[:, 0:52]
try:
if self.fest_gender_recoginizer:
gender = self.fest_gender_recoginizer.match(
cv2.cvtColor(img_fest_gender, cv2.COLOR_GRAY2BGR))
except:
IkaUtils.dprint(traceback.format_exc())
gender = None
try:
if self.fest_level_recoginizer:
level = self.fest_level_recoginizer.match(
cv2.cvtColor(img_fest_level, cv2.COLOR_GRAY2BGR))
except:
IkaUtils.dprint(traceback.format_exc())
level = None
team = None
return gender, level, team
def guess_fest_title_en_NA(self, img_fest_title):
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
def guess_fest_title_en_UK(self, img_fest_title):
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
def guess_fest_title(self, img_fest_title):
guess_fest_title_funcs = {
'ja': self.guess_fest_title_ja,
'en_NA': self.guess_fest_title_en_NA,
'en_UK': self.guess_fest_title_en_UK,
}
func = None
for lang in Localization.get_game_languages():
func = guess_fest_title_funcs.get(lang, None)
if func is not None:
break
if func is None:
IkaUtils.dprint(
'%s: Fest recoginiton in this language is not implemented'
% self
)
return None, None, None
return func(img_fest_title)
def analyze_team_colors(self, context, img):
# スクリーンショットからチームカラーを推測
assert 'won' in context['game']
assert img is not None
if context['game']['won']:
my_team_color_bgr = img[115:116, 1228:1229]
counter_team_color_bgr = img[452:453, 1228:1229]
else:
counter_team_color_bgr = img[115:116, 1228:1229]
my_team_color_bgr = img[452:453, 1228:1229]
my_team_color = {
'rgb': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
'hsv': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
}
counter_team_color = {
'rgb': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
'hsv': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
}
return (my_team_color, counter_team_color)
def _detect_names_per_my_kill(self, context):
all_players = context['game']['players']
me = IkaUtils.getMyEntryFromContext(context)
counter_team = \
list(filter(lambda x: x['team'] != me['team'], all_players))
img_name_counter_team = \
list(map(lambda e: e['img_name_normalized'], counter_team))
ct_name_classifier = PlayerNameClassifier(img_name_counter_team)
for kill_index in range(len(context['game'].get('kill_list', []))):
kill = context['game']['kill_list'][kill_index]
if kill.get('img_kill_hid', None) is None:
continue
player_index = ct_name_classifier.predict(kill['img_kill_hid'])
if player_index is None:
continue
if 1:
IkaUtils.dprint('%s: my kill %d -> player %d' %
(self, kill_index, player_index))
kill['player'] = counter_team[player_index]
def _analyze_kills_per_weapon(self, context):
r = {}
for kill in context['game'].get('kill_list', []):
if 'player' in kill:
weapon = kill['player']['weapon']
r[weapon] = r.get(weapon, 0) + 1
context['game']['kills_per_weapon'] = r
IkaUtils.dprint('%s: _analyze_kills_per_weapon result: %s' % (self, r))
return r
def _analyze_kills_per_player(self, context):
for kill in context['game'].get('kill_list', []):
if 'player' in kill:
player = kill['player']
player['my_kills'] = player.get('my_kills', 0) + 1
if 0:
IkaUtils.dprint('%s: _analyze_kills_per_player' % self)
for player in context['game']['players']:
IkaUtils.dprint(' player %d: my_kills = %d' % (
context['game']['players'].index(player),
player['my_kills']
))
def analyze_entry(self, img_entry):
# 各プレイヤー情報のスタート左位置
entry_left = 610
# 各プレイヤー報の横幅
entry_width = 610
# 各プレイヤー情報の高さ
entry_height = 46
# 各エントリ内での名前スタート位置と長さ
entry_xoffset_weapon = 760 - entry_left
entry_xoffset_weapon_me = 719 - entry_left
entry_width_weapon = 47
entry_xoffset_name = 809 - entry_left
entry_xoffset_name_me = 770 - entry_left
entry_width_name = 180
entry_xoffset_nawabari_score = 995 - entry_left
entry_width_nawabari_score = 115
entry_xoffset_score_p = entry_xoffset_nawabari_score + entry_width_nawabari_score
entry_width_score_p = 20
entry_xoffset_kd = 1185 - entry_left
entry_width_kd = 31
entry_height_kd = 21
me = self.is_entry_me(img_entry)
if me:
weapon_left = entry_xoffset_weapon_me
name_left = entry_xoffset_name_me
rank_left = 2
else:
weapon_left = entry_xoffset_weapon
name_left = entry_xoffset_name
rank_left = 43
img_rank = img_entry[20:45, rank_left:rank_left + 43]
img_weapon = img_entry[:, weapon_left:weapon_left + entry_width_weapon]
img_name = img_entry[:, name_left:name_left + entry_width_name]
img_score = img_entry[
:, entry_xoffset_nawabari_score:entry_xoffset_nawabari_score + entry_width_nawabari_score]
img_score_p = img_entry[
:, entry_xoffset_score_p:entry_xoffset_score_p + entry_width_score_p]
ret, img_score_p_thresh = cv2.threshold(cv2.cvtColor(
img_score_p, cv2.COLOR_BGR2GRAY), 230, 255, cv2.THRESH_BINARY)
img_kills = img_entry[0:entry_height_kd,
entry_xoffset_kd:entry_xoffset_kd + entry_width_kd]
img_deaths = img_entry[entry_height_kd:entry_height_kd *
2, entry_xoffset_kd:entry_xoffset_kd + entry_width_kd]
img_fes_title = img_name[0:(entry_height // 2), :]
img_fes_title_hsv = cv2.cvtColor(img_fes_title, cv2.COLOR_BGR2HSV)
yellow = cv2.inRange(img_fes_title_hsv[:, :, 0], 32 - 2, 32 + 2)
yellow2 = cv2.inRange(img_fes_title_hsv[:, :, 2], 240, 255)
img_fes_title_mask = yellow & yellow2
is_fes = np.sum(img_fes_title_mask) > img_fes_title_mask.shape[
0] * img_fes_title_mask.shape[1] * 16
if is_fes:
fes_gender, fes_level, fes_team = self.guess_fest_title(
img_fes_title
)
# フェス中ではなく、 p の表示があれば(avg = 55.0) ナワバリ。なければガチバトル
isRankedBattle = (not is_fes) and (
np.average(img_score_p_thresh[:, :]) < 16)
isNawabariBattle = (not is_fes) and (not isRankedBattle)
entry = {
"me": me,
"img_rank": img_rank,
"img_weapon": img_weapon,
"img_name": img_name,
"img_name_normalized": normalize_player_name(img_name),
"img_score": img_score,
"img_kills": img_kills,
"img_deaths": img_deaths,
}
if is_fes:
entry['img_fes_title'] = img_fes_title
if fes_gender and ('ja' in fes_gender):
entry['gender'] = fes_gender['ja']
if fes_level and ('ja' in fes_level):
entry['prefix'] = fes_level['ja']
if fes_gender and ('en' in fes_gender):
entry['gender_en'] = fes_gender['en']
if fes_level and ('boy' in fes_level):
entry['prefix_en'] = fes_level['boy']
if self.udemae_recoginizer and isRankedBattle:
try:
entry['udemae_pre'] = self.udemae_recoginizer.match(
entry['img_score']).upper()
except:
IkaUtils.dprint('Exception occured in Udemae recoginization.')
IkaUtils.dprint(traceback.format_exc())
if self.number_recoginizer:
try:
entry['rank'] = self.number_recoginizer.match_digits(
entry['img_rank'])
entry['kills'] = self.number_recoginizer.match_digits(
entry['img_kills'])
entry['deaths'] = self.number_recoginizer.match_digits(
entry['img_deaths'])
if isNawabariBattle:
entry['score'] = self.number_recoginizer.match_digits(
entry['img_score'])
except:
IkaUtils.dprint('Exception occured in K/D recoginization.')
IkaUtils.dprint(traceback.format_exc())
return entry
def extract_entries(self, context, img=None):
if img is None:
img = self.adjust_image(context)
# 各プレイヤー情報のスタート左位置
entry_left = 610
# 各プレイヤー情報の横幅
entry_width = 630
# 各プレイヤー情報の高さ
entry_height = 45
entry_top = [101, 166, 231, 296, 431, 496, 561, 626]
img_entries = []
for entry_id in range(len(entry_top)): # 0..7
top = entry_top[entry_id]
img_entry = img[top:top + entry_height,
entry_left:entry_left + entry_width]
img_entries.append(img_entry)
return img_entries
def is_entries_still_sliding(self, img_entries):
white_filter = matcher.MM_WHITE()
array0to14 = np.array(range(15), dtype=np.int32)
x_pos_list = []
for img_entry in img_entries:
img_XX = img_entry[:, 1173 - 610: 1173 + 13 - 610] # -> 2D
img_XX_hist = np.sum(white_filter(img_XX), axis=0) # -> 1D
img_XX_hist_x = np.extract(img_XX_hist > 0, array0to14[
0:img_XX_hist.shape[0]])
if img_XX_hist_x.shape[0] == 0:
continue
img_XX_hist_x_avg = np.average(img_XX_hist_x)
x_pos_list.append(img_XX_hist_x_avg)
x_avg_min = np.amin(x_pos_list)
x_avg_max = np.amax(x_pos_list)
x_diff = int(x_avg_max - x_avg_min)
if 0: # debug
print('is_entries_still_sliding: x_pos_list %s min %f max %f diff %d' %
(x_pos_list, x_avg_min, x_avg_max, x_diff))
return x_diff
def analyze(self, context):
context['game']['players'] = []
weapon_list = []
img = self.adjust_image(context)
img_entries = self.extract_entries(context, img)
# Adjust img_entries rect using result of
# self.is_entries_still_sliding().
# This allows more accurate weapon classification.
diff_x = self.is_entries_still_sliding(img_entries)
if diff_x > 0:
white_filter = matcher.MM_WHITE()
index = 7
# Find the last player's index.
while (0 < index) and \
(np.sum(white_filter(img_entries[index])) < 1000):
index -= 1
# adjust the player's rect 3 times.
for i in range(3):
diff_x = self.is_entries_still_sliding(img_entries)
img_entry = img_entries[index]
w = img_entry.shape[1] - diff_x
img_entries[index][:, 0: w] = img_entry[:, diff_x: w + diff_x]
if 0:
cv2.imshow('a', img_entries[0])
cv2.imshow('b', img_entries[index])
cv2.waitKey(0)
for entry_id in range(len(img_entries)):
img_entry = img_entries[entry_id]
e = self.analyze_entry(img_entry)
if e.get('rank', None) is None:
continue
# team, rank_in_team
e['team'] = 1 if entry_id < 4 else 2
e['rank_in_team'] = entry_id + \
1 if e['team'] == 1 else entry_id - 3
# won
if e['me']:
context['game']['won'] = (entry_id < 4)
context['game']['players'].append(e)
if 0:
e_ = e.copy()
for f in list(e.keys()):
if f.startswith('img_'):
del e_[f]
print(e_)
if 0:
worker = threading.Thread(
target=self.async_recoginiton_worker, args=(context,))
worker.start()
else:
self.async_recoginiton_worker(context)
# チームカラー
team_colors = self.analyze_team_colors(context, img)
context['game']['my_team_color'] = team_colors[0]
context['game']['counter_team_color'] = team_colors[1]
# フェス関係
context['game']['is_fes'] = ('prefix' in context['game']['players'][0])
# そのほか
# context['game']['timestamp'] = datetime.now()
context['game']['image_scoreboard'] = \
copy.deepcopy(context['engine']['frame'])
self._call_plugins_later('on_result_detail_still')
return True
def reset(self):
super(ResultDetail, self).reset()
self._last_event_msec = - 100 * 1000
self._match_start_msec = - 100 * 1000
self._last_frame = None
self._diff_pixels = []
def _state_default(self, context):
if self.matched_in(context, 30 * 1000):
return False
if self.is_another_scene_matched(context, 'GameTimerIcon'):
return False
frame = context['engine']['frame']
if frame is None:
return False
matched = ImageUtils.match_with_mask(
context['engine']['frame'], self.winlose_gray, 0.997, 0.22)
if matched:
self._match_start_msec = context['engine']['msec']
self._switch_state(self._state_tracking)
return matched
def _state_tracking(self, context):
frame = context['engine']['frame']
if frame is None:
return False
# マッチ1: 既知のマスクでざっくり
matched = ImageUtils.match_with_mask(
context['engine']['frame'], self.winlose_gray, 0.997, 0.22)
# マッチ2: マッチ1を満たした場合は、白文字が安定するまで待つ
# 条件1: 前回のイメージとの白文字の diff が 0 pixel になること
# 条件2: K/D数の手前にある"X"印がの位置が縦方向で合っていること
diff_pixels = None
img_current_h_i16 = None
matched_diff0 = False
matched_diffX = False
if matched:
img_current_bgr = frame[626:626 + 45, 640:1280]
img_current_hsv = cv2.cvtColor(img_current_bgr, cv2.COLOR_BGR2HSV)
img_current_h_i16 = np.array(img_current_hsv[:, :, 1], np.int16)
if matched and (self._last_frame is not None):
img_diff = abs(img_current_h_i16 - self._last_frame)
img_diff_u8 = np.array(img_diff, np.uint8)
img_white = self._white_filter(img_current_bgr)
img_diff_u8[img_white < 128] = 0
img_diff_u8[img_diff_u8 < 16] = 0
img_diff_u8[img_diff_u8 > 1] = 255
# cv2.imshow('DIFF', img_diff_u8)
# cv2.imshow('white', img_white)
diff_pixels = int(np.sum(img_diff_u8) / 255)
if img_current_h_i16 is not None:
self._last_frame = img_current_h_i16
if diff_pixels is not None:
matched_diff0 = (diff_pixels == 0)
# 白色マスクがぴったり合わなかった場合には X 印によるマッチを行う
# ・is_entries_still_sliding() の値(X印の散らばり度)
# が 0 であれば matched_diffX = True
# ・is_entries_still_sliding() の返却値(X印の散らばり度)
# の履歴の最小値と最新値が一致したら妥協で matched_diffX = True
if (diff_pixels is not None) and (not matched_diff0):
# FIXME: adjust_image は非常にコストが高い
img = self.adjust_image(context)
img_entries = self.extract_entries(context, img)
diff_x = self.is_entries_still_sliding(img_entries)
matched_diffX = (diff_x == 0)
if not matched_diffX:
self._diff_pixels.append(diff_x)
if len(self._diff_pixels) > 4:
self._diff_pixels.pop(0)
matched_diffX = \
(np.amin(self._diff_pixels) == matched_diffX)
# escaped: 1000ms 以上の非マッチが続きシーンを抜けたことが確定
# matched2: 白文字が安定している(条件1 or 条件2を満たしている)
# triggered: すでに一定時間以内にイベントが取りがされた
escaped = not self.matched_in(context, 1000)
matched2 = matched_diff0 or matched_diffX
triggered = self.matched_in(
context, 30 * 1000, attr='_last_event_msec')
if matched2 and (not triggered):
self.analyze(context)
# self.dump(context)
# self._call_plugins('on_result_detail')
# self._call_plugins('on_game_individual_result')
self._last_event_msec = context['engine']['msec']
triggered = True
if matched:
return True
if escaped:
if (not triggered) and (len(self._diff_pixels) > 0):
IkaUtils.dprint(''.join((
'%s: 戦績画面を検出しましたが静止画を認識できませんでした。考えられる原因\n' % self,
' ・HDMIキャプチャデバイスからのノイズ入力が多い\n',
' ・ブロックノイズが多いビデオファイルを処理している\n',
' ・正しいフォーマットで画像が入力されていない\n',
' min(diff_pixels): %s' % min(self._diff_pixels),
)))
self._match_start_msec = - 100 * 1000
self._last_frame = None
self._diff_pixels = []
self._switch_state(self._state_default)
return False
def dump(self, context):
matched = True
analyzed = True
won = IkaUtils.getWinLoseText(
context['game']['won'], win_text="win", lose_text="lose", unknown_text="unknown")
fes = context['game'].get('is_fes', False)
print("matched %s analyzed %s result %s fest %s" %
(matched, analyzed, won, fes))
print('--------')
for e in context['game']['players']:
udemae = e['udemae_pre'] if ('udemae_pre' in e) else None
rank = e['rank'] if ('rank' in e) else None
kills = e['kills'] if ('kills' in e) else None
deaths = e['deaths'] if ('deaths' in e) else None
weapon = e['weapon'] if ('weapon' in e) else None
score = e['score'] if ('score' in e) else None
me = '*' if e['me'] else ''
if 'prefix' in e:
prefix = e['prefix']
prefix_ = re.sub('の', '', prefix)
gender = e['gender']
else:
prefix_ = ''
gender = ''
print("team %s rank_in_team %s rank %s udemae %s %s/%s weapon %s score %s %s%s %s" % (
e.get('team', None),
e.get('rank_in_team', None),
e.get('rank', None),
e.get('udemae_pre', None),
e.get('kills', None),
e.get('deaths', None),
e.get('weapon', None),
e.get('score', None),
prefix_, gender,
me,))
print('--------')
def _analyze(self, context):
frame = context['engine']['frame']
return True
def _init_scene(self, debug=False):
self.mask_win = IkaMatcher(
651, 47, 99, 33,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.20,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:WIN',
debug=debug,
)
self.mask_lose = IkaMatcher(
651, 378, 99, 33,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.40,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:LOSE',
debug=debug,
)
self.mask_x = IkaMatcher(
1173, 101, 14, 40,
img_file='result_detail.png',
threshold=0.60,
orig_threshold=0.40,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='result_detail:X',
debug=False,
)
languages = Localization.get_game_languages()
for lang in languages:
mask_file = IkaUtils.get_path('masks', lang, 'result_detail.png')
if os.path.exists(mask_file):
break
if not os.path.exists(mask_file):
mask_file = IkaUtils.get_path('masks', 'result_detail.png')
winlose = imread(mask_file)
self.winlose_gray = cv2.cvtColor(winlose, cv2.COLOR_BGR2GRAY)
self._white_filter = matcher.MM_WHITE()
self.udemae_recoginizer = UdemaeRecoginizer()
self.number_recoginizer = NumberRecoginizer()
# for SplatFest (ja)
self.fest_gender_recoginizer = character_recoginizer.FesGenderRecoginizer()
self.fest_level_recoginizer = character_recoginizer.FesLevelRecoginizer()
self.load_akaze_model()
self._client_local = APIClient(local_mode=True)
# self._client_remote = APIClient(local_mode=False, base_uri='http://localhost:8000')
self._client_remote = None
if __name__ == "__main__":
ResultDetail.main_func()
|
test.py | import pytest
import random
import threading
import time
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
transient_ch_errors = [23, 32, 210]
cluster = ClickHouseCluster(__file__)
s0r0 = cluster.add_instance(
"s0r0",
main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"],
stay_alive=True,
with_zookeeper=True,
)
s0r1 = cluster.add_instance(
"s0r1",
main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"],
stay_alive=True,
with_zookeeper=True,
)
s1r0 = cluster.add_instance(
"s1r0",
main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"],
stay_alive=True,
with_zookeeper=True,
)
s1r1 = cluster.add_instance(
"s1r1",
main_configs=["configs/remote_servers.xml", "configs/merge_tree.xml"],
stay_alive=True,
with_zookeeper=True,
)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_move(started_cluster):
for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]):
for replica_ix, r in enumerate(rs):
r.query(
"""
DROP TABLE IF EXISTS test_move;
CREATE TABLE test_move(v UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_move', '{}')
ORDER BY tuple()
""".format(
shard_ix, r.name
)
)
s0r0.query("SYSTEM STOP MERGES test_move")
s0r1.query("SYSTEM STOP MERGES test_move")
s0r0.query("INSERT INTO test_move VALUES (1)")
s0r0.query("INSERT INTO test_move VALUES (2)")
assert "2" == s0r0.query("SELECT count() FROM test_move").strip()
assert "0" == s1r0.query("SELECT count() FROM test_move").strip()
s0r0.query(
"ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_move'"
)
print(s0r0.query("SELECT * FROM system.part_moves_between_shards"))
s0r0.query("SYSTEM START MERGES test_move")
s0r0.query("OPTIMIZE TABLE test_move FINAL")
wait_for_state("DONE", s0r0, "test_move")
for n in [s0r0, s0r1]:
assert "1" == n.query("SELECT count() FROM test_move").strip()
for n in [s1r0, s1r1]:
assert "1" == n.query("SELECT count() FROM test_move").strip()
# Move part back
s1r0.query(
"ALTER TABLE test_move MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/test_move'"
)
wait_for_state("DONE", s1r0, "test_move")
for n in [s0r0, s0r1]:
assert "2" == n.query("SELECT count() FROM test_move").strip()
for n in [s1r0, s1r1]:
assert "0" == n.query("SELECT count() FROM test_move").strip()
def test_deduplication_while_move(started_cluster):
for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]):
for replica_ix, r in enumerate(rs):
r.query(
"""
DROP TABLE IF EXISTS test_deduplication;
CREATE TABLE test_deduplication(v UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_deduplication', '{}')
ORDER BY tuple()
""".format(
shard_ix, r.name
)
)
r.query(
"""
DROP TABLE IF EXISTS test_deduplication_d;
CREATE TABLE test_deduplication_d AS test_deduplication
ENGINE Distributed('test_cluster', '', test_deduplication)
"""
)
s0r0.query("SYSTEM STOP MERGES test_deduplication")
s0r1.query("SYSTEM STOP MERGES test_deduplication")
s0r0.query("INSERT INTO test_deduplication VALUES (1)")
s0r0.query("INSERT INTO test_deduplication VALUES (2)")
s0r1.query("SYSTEM SYNC REPLICA test_deduplication", timeout=20)
assert "2" == s0r0.query("SELECT count() FROM test_deduplication").strip()
assert "0" == s1r0.query("SELECT count() FROM test_deduplication").strip()
s0r0.query(
"ALTER TABLE test_deduplication MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_deduplication'"
)
s0r0.query("SYSTEM START MERGES test_deduplication")
expected = """
1
2
"""
def deduplication_invariant_test():
n = random.choice(list(started_cluster.instances.values()))
assert TSV(
n.query(
"SELECT * FROM test_deduplication_d ORDER BY v",
settings={"allow_experimental_query_deduplication": 1},
)
) == TSV(expected)
# https://github.com/ClickHouse/ClickHouse/issues/34089
assert TSV(
n.query(
"SELECT count() FROM test_deduplication_d",
settings={"allow_experimental_query_deduplication": 1},
)
) == TSV("2")
assert TSV(
n.query(
"SELECT count() FROM test_deduplication_d",
settings={
"allow_experimental_query_deduplication": 1,
"allow_experimental_projection_optimization": 1,
},
)
) == TSV("2")
deduplication_invariant = ConcurrentInvariant(deduplication_invariant_test)
deduplication_invariant.start()
wait_for_state("DONE", s0r0, "test_deduplication")
deduplication_invariant.stop_and_assert_no_exception()
def test_part_move_step_by_step(started_cluster):
for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]):
for replica_ix, r in enumerate(rs):
r.query(
"""
DROP TABLE IF EXISTS test_part_move_step_by_step;
CREATE TABLE test_part_move_step_by_step(v UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_part_move_step_by_step', '{}')
ORDER BY tuple()
""".format(
shard_ix, r.name
)
)
r.query(
"""
DROP TABLE IF EXISTS test_part_move_step_by_step_d;
CREATE TABLE test_part_move_step_by_step_d AS test_part_move_step_by_step
ENGINE Distributed('test_cluster', currentDatabase(), test_part_move_step_by_step)
"""
)
s0r0.query("SYSTEM STOP MERGES test_part_move_step_by_step")
s0r1.query("SYSTEM STOP MERGES test_part_move_step_by_step")
s0r0.query("INSERT INTO test_part_move_step_by_step VALUES (1)")
s0r0.query("INSERT INTO test_part_move_step_by_step VALUES (2)")
s0r1.query("SYSTEM SYNC REPLICA test_part_move_step_by_step", timeout=20)
assert "2" == s0r0.query("SELECT count() FROM test_part_move_step_by_step").strip()
assert "0" == s1r0.query("SELECT count() FROM test_part_move_step_by_step").strip()
expected = """
1
2
"""
def deduplication_invariant_test():
n = random.choice(list(started_cluster.instances.values()))
try:
assert TSV(
n.query(
"SELECT * FROM test_part_move_step_by_step_d ORDER BY v",
settings={"allow_experimental_query_deduplication": 1},
)
) == TSV(expected)
except QueryRuntimeException as e:
# ignore transient errors that are caused by us restarting nodes
if e.returncode not in transient_ch_errors:
raise e
deduplication_invariant = ConcurrentInvariant(deduplication_invariant_test)
deduplication_invariant.start()
# Stop a source replica to prevent SYNC_SOURCE succeeding.
s0r1.stop_clickhouse()
s0r0.query(
"ALTER TABLE test_part_move_step_by_step MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step'"
)
# Should hang on SYNC_SOURCE until all source replicas acknowledge new pinned UUIDs.
wait_for_state(
"SYNC_SOURCE",
s0r0,
"test_part_move_step_by_step",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start all replicas in source shard but stop a replica in destination shard
# to prevent SYNC_DESTINATION succeeding.
s1r1.stop_clickhouse()
s0r1.start_clickhouse()
# After SYNC_SOURCE step no merges will be assigned.
s0r0.query(
"SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;"
)
s0r1.query(
"SYSTEM START MERGES test_part_move_step_by_step; OPTIMIZE TABLE test_part_move_step_by_step;"
)
wait_for_state(
"SYNC_DESTINATION",
s0r0,
"test_part_move_step_by_step",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start previously stopped replica in destination shard to let SYNC_DESTINATION
# succeed.
# Stop the other replica in destination shard to prevent DESTINATION_FETCH succeed.
s1r0.stop_clickhouse()
s1r1.start_clickhouse()
wait_for_state(
"DESTINATION_FETCH",
s0r0,
"test_part_move_step_by_step",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start previously stopped replica in destination shard to let DESTINATION_FETCH
# succeed.
# Stop the other replica in destination shard to prevent DESTINATION_ATTACH succeed.
s1r1.stop_clickhouse()
s1r0.start_clickhouse()
wait_for_state(
"DESTINATION_ATTACH",
s0r0,
"test_part_move_step_by_step",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start all replicas in destination shard to let DESTINATION_ATTACH succeed.
# Stop a source replica to prevent SOURCE_DROP succeeding.
s0r0.stop_clickhouse()
s1r1.start_clickhouse()
wait_for_state(
"SOURCE_DROP",
s0r1,
"test_part_move_step_by_step",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
s0r0.start_clickhouse()
wait_for_state("DONE", s0r1, "test_part_move_step_by_step")
deduplication_invariant.assert_no_exception()
# No hung tasks in replication queue. Would timeout otherwise.
for instance in started_cluster.instances.values():
instance.query("SYSTEM SYNC REPLICA test_part_move_step_by_step")
assert "1" == s0r0.query("SELECT count() FROM test_part_move_step_by_step").strip()
assert "1" == s1r0.query("SELECT count() FROM test_part_move_step_by_step").strip()
deduplication_invariant.stop_and_assert_no_exception()
def test_part_move_step_by_step_kill(started_cluster):
for shard_ix, rs in enumerate([[s0r0, s0r1], [s1r0, s1r1]]):
for replica_ix, r in enumerate(rs):
r.query(
"""
DROP TABLE IF EXISTS test_part_move_step_by_step_kill;
CREATE TABLE test_part_move_step_by_step_kill(v UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{}/tables/test_part_move_step_by_step_kill', '{}')
ORDER BY tuple()
""".format(
shard_ix, r.name
)
)
r.query(
"""
DROP TABLE IF EXISTS test_part_move_step_by_step_kill_d;
CREATE TABLE test_part_move_step_by_step_kill_d AS test_part_move_step_by_step_kill
ENGINE Distributed('test_cluster', currentDatabase(), test_part_move_step_by_step_kill)
"""
)
s0r0.query("SYSTEM STOP MERGES test_part_move_step_by_step_kill")
s0r1.query("SYSTEM STOP MERGES test_part_move_step_by_step_kill")
s0r0.query("INSERT INTO test_part_move_step_by_step_kill VALUES (1)")
s0r0.query("INSERT INTO test_part_move_step_by_step_kill VALUES (2)")
s0r1.query("SYSTEM SYNC REPLICA test_part_move_step_by_step_kill", timeout=20)
assert (
"2"
== s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip()
)
assert (
"0"
== s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip()
)
expected = """
1
2
"""
def deduplication_invariant_test():
n = random.choice(list(started_cluster.instances.values()))
try:
assert TSV(
n.query(
"SELECT * FROM test_part_move_step_by_step_kill_d ORDER BY v",
settings={"allow_experimental_query_deduplication": 1},
)
) == TSV(expected)
except QueryRuntimeException as e:
# ignore transient errors that are caused by us restarting nodes
if e.returncode not in transient_ch_errors:
raise e
deduplication_invariant = ConcurrentInvariant(deduplication_invariant_test)
deduplication_invariant.start()
# Stop a source replica to prevent SYNC_SOURCE succeeding.
s0r1.stop_clickhouse()
s0r0.query(
"ALTER TABLE test_part_move_step_by_step_kill MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/test_part_move_step_by_step_kill'"
)
# Should hang on SYNC_SOURCE until all source replicas acknowledge new pinned UUIDs.
wait_for_state(
"SYNC_SOURCE",
s0r0,
"test_part_move_step_by_step_kill",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start all replicas in source shard but stop a replica in destination shard
# to prevent SYNC_DESTINATION succeeding.
s1r1.stop_clickhouse()
s0r1.start_clickhouse()
# After SYNC_SOURCE step no merges will be assigned.
s0r0.query(
"SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;"
)
s0r1.query(
"SYSTEM START MERGES test_part_move_step_by_step_kill; OPTIMIZE TABLE test_part_move_step_by_step_kill;"
)
wait_for_state(
"SYNC_DESTINATION",
s0r0,
"test_part_move_step_by_step_kill",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Start previously stopped replica in destination shard to let SYNC_DESTINATION
# succeed.
# Stop the other replica in destination shard to prevent DESTINATION_FETCH succeed.
s1r0.stop_clickhouse()
s1r1.start_clickhouse()
wait_for_state(
"DESTINATION_FETCH",
s0r0,
"test_part_move_step_by_step_kill",
"Some replicas haven\\'t processed event",
)
# Start previously stopped replica in destination shard to let DESTINATION_FETCH
# succeed.
# Stop the other replica in destination shard to prevent DESTINATION_ATTACH succeed.
s1r1.stop_clickhouse()
s1r0.start_clickhouse()
wait_for_state(
"DESTINATION_ATTACH",
s0r0,
"test_part_move_step_by_step_kill",
"Some replicas haven\\'t processed event",
)
deduplication_invariant.assert_no_exception()
# Rollback here.
s0r0.query(
"""
KILL PART_MOVE_TO_SHARD
WHERE task_uuid = (SELECT task_uuid FROM system.part_moves_between_shards WHERE table = 'test_part_move_step_by_step_kill')
"""
)
wait_for_state(
"DESTINATION_ATTACH",
s0r0,
"test_part_move_step_by_step_kill",
assert_exception_msg="Some replicas haven\\'t processed event",
assert_rollback=True,
)
s1r1.start_clickhouse()
wait_for_state(
"CANCELLED", s0r0, "test_part_move_step_by_step_kill", assert_rollback=True
)
deduplication_invariant.assert_no_exception()
# No hung tasks in replication queue. Would timeout otherwise.
for instance in started_cluster.instances.values():
instance.query("SYSTEM SYNC REPLICA test_part_move_step_by_step_kill")
assert (
"2"
== s0r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip()
)
assert (
"0"
== s1r0.query("SELECT count() FROM test_part_move_step_by_step_kill").strip()
)
deduplication_invariant.stop_and_assert_no_exception()
def test_move_not_permitted(started_cluster):
# Verify that invariants for part compatibility are checked.
# Tests are executed in order. Make sure cluster is up if previous test
# failed.
s0r0.start_clickhouse()
s1r0.start_clickhouse()
for ix, n in enumerate([s0r0, s1r0]):
n.query(
"""
DROP TABLE IF EXISTS not_permitted_columns;
CREATE TABLE not_permitted_columns(v_{ix} UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{ix}/tables/not_permitted_columns', 'r')
ORDER BY tuple();
""".format(
ix=ix
)
)
partition = "date"
if ix > 0:
partition = "v"
n.query(
"""
DROP TABLE IF EXISTS not_permitted_partition;
CREATE TABLE not_permitted_partition(date Date, v UInt64)
ENGINE ReplicatedMergeTree('/clickhouse/shard_{ix}/tables/not_permitted_partition', 'r')
PARTITION BY ({partition})
ORDER BY tuple();
""".format(
ix=ix, partition=partition
)
)
s0r0.query("INSERT INTO not_permitted_columns VALUES (1)")
s0r0.query("INSERT INTO not_permitted_partition VALUES ('2021-09-03', 1)")
with pytest.raises(
QueryRuntimeException,
match="DB::Exception: Source and destination are the same",
):
s0r0.query(
"ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_0/tables/not_permitted_columns'"
)
with pytest.raises(
QueryRuntimeException,
match="DB::Exception: Table columns structure in ZooKeeper is different from local table structure.",
):
s0r0.query(
"ALTER TABLE not_permitted_columns MOVE PART 'all_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_columns'"
)
with pytest.raises(
QueryRuntimeException,
match="DB::Exception: Existing table metadata in ZooKeeper differs in partition key expression.",
):
s0r0.query(
"ALTER TABLE not_permitted_partition MOVE PART '20210903_0_0_0' TO SHARD '/clickhouse/shard_1/tables/not_permitted_partition'"
)
def wait_for_state(
desired_state,
instance,
test_table,
assert_exception_msg=None,
assert_rollback=False,
):
last_debug_print_time = time.time()
print("Waiting to reach state: {}".format(desired_state))
if assert_exception_msg:
print(" with exception contents: {}".format(assert_exception_msg))
if assert_rollback:
print(" and rollback: {}".format(assert_rollback))
while True:
tasks = TSV.toMat(
instance.query(
"SELECT state, num_tries, last_exception, rollback FROM system.part_moves_between_shards WHERE table = '{}'".format(
test_table
)
)
)
assert len(tasks) == 1, "only one task expected in this test"
if time.time() - last_debug_print_time > 30:
last_debug_print_time = time.time()
print("Current state: ", tasks)
[state, num_tries, last_exception, rollback] = tasks[0]
if state == desired_state:
if assert_exception_msg and int(num_tries) < 3:
# Let the task be retried a few times when expecting an exception
# to make sure the exception is persistent and the code doesn't
# accidentally continue to run when we expect it not to.
continue
if assert_exception_msg:
assert assert_exception_msg in last_exception
if assert_rollback:
assert int(rollback) == 1, "rollback bit isn't set"
break
elif state in ["DONE", "CANCELLED"]:
raise Exception(
"Reached terminal state {}, but was waiting for {}".format(
state, desired_state
)
)
time.sleep(0.1)
class ConcurrentInvariant:
def __init__(self, invariant_test, loop_sleep=0.1):
self.invariant_test = invariant_test
self.loop_sleep = loop_sleep
self.started = False
self.exiting = False
self.exception = None
self.thread = threading.Thread(target=self._loop)
def start(self):
if self.started:
raise Exception("invariant thread already started")
self.started = True
self.thread.start()
def stop_and_assert_no_exception(self):
self._assert_started()
self.exiting = True
self.thread.join()
if self.exception:
raise self.exception
def assert_no_exception(self):
self._assert_started()
if self.exception:
raise self.exception
def _loop(self):
try:
while not self.exiting:
self.invariant_test()
time.sleep(self.loop_sleep)
except Exception as e:
self.exiting = True
self.exception = e
def _assert_started(self):
if not self.started:
raise Exception("invariant thread not started, forgot to call start?")
|
test_asyncore.py | import asyncore
import unittest
import select
import os
import socket
import sys
import time
import warnings
import errno
import struct
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO
from io import StringIO
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
HAS_UNIX_SOCKETS = hasattr(socket, 'AF_UNIX')
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
sock.bind(addr)
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEqual(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEqual(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event']
self.assertEqual(lines, expected)
def test_issue_8594(self):
# XXX - this test is supposed to be removed in next major Python
# version
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
self.assertRaisesRegex(AttributeError, 'dispatcher instance',
getattr, d, 'foo')
# cheap inheritance with the underlying socket is supposed
# to still work but a DeprecationWarning is expected
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
family = d.family
self.assertEqual(family, socket.AF_INET)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = support.bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b'\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data*2)
finally:
t.join()
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
@unittest.skipUnless(hasattr(asyncore, 'file_wrapper'),
'asyncore.file_wrapper required')
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, 'wb') as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, 'rb') as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(hasattr(asyncore, 'file_dispatcher'),
'asyncore.file_dispatcher required')
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
class BaseTestHandler(asyncore.dispatcher):
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self):
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI(unittest.TestCase):
def tearDown(self):
asyncore.close_all()
def loop_waiting_for_flag(self, instance, timeout=5):
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b'x' * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b'\0' * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(sys.platform.startswith("sunos"),
"OOB support is broken on Solaris")
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(bytes(chr(244), 'latin-1'), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else:
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
#self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
self.assertEqual(s.socket.family, self.family)
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
self.assertEqual(s.socket.type, socket.SOCK_STREAM | SOCK_NONBLOCK)
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
sock = socket.socket(self.family)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(s.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
finally:
sock.close()
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_quick_connect(self):
# see: http://bugs.python.org/issue10340
if self.family in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
server = BaseServer(self.family, self.addr)
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1,
count=500))
t.start()
s = socket.socket(self.family, socket.SOCK_STREAM)
s.settimeout(.2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(server.address)
except socket.error:
pass
finally:
s.close()
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 support required')
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = ('::1', 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, 'Unix sockets required')
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = support.TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets):
use_poll = True
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll, FileWrapperTest,
TestAPI_UseIPv4Select, TestAPI_UseIPv4Poll, TestAPI_UseIPv6Select,
TestAPI_UseIPv6Poll, TestAPI_UseUnixSocketsSelect,
TestAPI_UseUnixSocketsPoll]
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _test_terminate(cls):
time.sleep(100)
def test_terminate(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
p.terminate()
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, 1.0))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
p.join(5)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 30 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.170)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
for i in range(10):
try:
if get_value(woken) == 6:
break
except NotImplementedError:
break
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=10))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
p.join(5)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(10))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 10)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=5)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print json.dumps(flags)
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
p.join(timeout=5)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
p.join(timeout=5)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
p.join(timeout=5)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print out
print err
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
print >>sys.stderr, 'Warning -- Dangling processes: %s' % processes
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
print >>sys.stderr, 'Warning -- Dangling threads: %s' % threads
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
print >>sys.stderr, "Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
print 'Warning -- Shared objects which still exist at manager '
'shutdown:'
print cls.manager._debug_info()
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
print >>sys.stderr, 'Warning -- Dangling processes: %s' % processes
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
print >>sys.stderr, 'Warning -- Dangling threads: %s' % threads
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
headphones.py | from flask import jsonify, render_template, json, send_file
from maraschino import app, logger, WEBROOT, RUNDIR
from maraschino.tools import requires_auth, get_setting_value
from threading import Thread
import StringIO
import urllib
import urllib2
import base64
def headphones_http():
if get_setting_value('headphones_https') == '1':
return 'https://'
else:
return 'http://'
def headphones_url():
port = get_setting_value('headphones_port')
url_base = get_setting_value('headphones_host')
webroot = get_setting_value('headphones_webroot')
if port:
url_base = '%s:%s' % (url_base, port)
if webroot:
url_base = '%s/%s' % (url_base, webroot)
return headphones_http() + url_base
def headphones_api(command, use_json=True, dev=False):
username = get_setting_value('headphones_user')
password = get_setting_value('headphones_password')
apikey = get_setting_value('headphones_api')
url = '%s/api?apikey=%s&cmd=%s' % (headphones_url(), apikey, command)
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
data = urllib2.urlopen(request).read()
if use_json:
data = json.JSONDecoder().decode(data)
if dev:
print 'DEVELOPER :: %s' % url
print 'DEVELOPER :: %s' % data
return data
def convert_track_duration(milliseconds):
if milliseconds is None:
return "00:00"
seconds = milliseconds / 1000
hours = seconds / 3600
seconds -= 3600 * hours
minutes = seconds / 60
seconds -= 60 * minutes
if hours == 0:
return "%02d:%02d" % (minutes, seconds)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
def hp_compact():
return get_setting_value('headphones_compact') == '1'
def headphones_exception(e):
logger.log('HEADPHONES :: EXCEPTION -- %s' % e, 'DEBUG')
return render_template('headphones/base.html', headphones=True, message=e)
def hp_artistart(id):
return '%s/xhr/headphones/img/artist/%s' % (WEBROOT, id)
def hp_albumart(id):
return '%s/xhr/headphones/img/album/%s' % (WEBROOT, id)
@app.route('/xhr/headphones/img/<type>/<id>/')
@requires_auth
def xhr_headphones_image(type, id):
if type == 'artist':
cache_url = headphones_api('getArtistThumb&id=' + id)
else:
cache_url = headphones_api('getAlbumThumb&id=' + id)
if cache_url:
url = '%s/%s' % (headphones_url(), cache_url)
else:
img = RUNDIR + '/static/images/applications/HeadPhones.png'
return send_file(img, mimetype='image/jpeg')
username = get_setting_value('headphones_user')
password = get_setting_value('headphones_password')
request = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
img = StringIO.StringIO(urllib2.urlopen(request).read())
return send_file(img, mimetype='image/jpeg')
@app.route('/xhr/headphones/')
@requires_auth
def xhr_headphones():
return xhr_headphones_upcoming()
@app.route('/xhr/headphones/artists/')
@requires_auth
def xhr_headphones_artists(mobile=False):
logger.log('HEADPHONES :: Fetching artists list', 'INFO')
artists = []
try:
headphones = headphones_api('getIndex')
updates = headphones_api('getVersion')
except Exception as e:
if mobile:
headphones_exception(e)
return artists
return headphones_exception(e)
for artist in headphones:
if not 'Fetch failed' in artist['ArtistName']:
try:
artist['Percent'] = int(100 * float(artist['HaveTracks']) / float(artist['TotalTracks']))
except:
artist['Percent'] = 0
if not hp_compact() and not mobile:
try:
artist['ThumbURL'] = hp_artistart(artist['ArtistID'])
except:
pass
artists.append(artist)
if mobile:
return artists
return render_template('headphones/artists.html',
headphones=True,
app_link=headphones_url(),
artists=artists,
updates=updates,
compact=hp_compact(),
)
@app.route('/xhr/headphones/artist/<artistid>/')
@requires_auth
def xhr_headphones_artist(artistid, mobile=False):
logger.log('HEADPHONES :: Fetching artist', 'INFO')
try:
albums = headphones_api('getArtist&id=%s' % artistid)
except Exception as e:
return headphones_exception(e)
if not hp_compact() and not mobile:
for album in albums['albums']:
try:
album['ThumbURL'] = hp_albumart(album['AlbumID'])
except:
pass
if mobile:
return albums
return render_template('headphones/artist.html',
albums=albums,
headphones=True,
compact=hp_compact(),
)
@app.route('/xhr/headphones/album/<albumid>/')
@requires_auth
def xhr_headphones_album(albumid, mobile=False):
logger.log('HEADPHONES :: Fetching album', 'INFO')
try:
headphones = headphones_api('getAlbum&id=%s' % albumid)
except Exception as e:
return headphones_exception(e)
album = headphones['album'][0]
try:
album['ThumbURL'] = hp_albumart(album['AlbumID'])
except:
pass
album['TotalDuration'] = 0
for track in headphones['tracks']:
if track['TrackDuration'] == None:
track['TrackDuration'] = 0
album['TotalDuration'] = album['TotalDuration'] + int(track['TrackDuration'])
track['TrackDuration'] = convert_track_duration(track['TrackDuration'])
album['TotalDuration'] = convert_track_duration(album['TotalDuration'])
album['Tracks'] = len(headphones['tracks'])
if mobile:
return headphones
return render_template('headphones/album.html',
album=headphones,
headphones=True,
compact=hp_compact(),
)
@app.route('/xhr/headphones/upcoming/')
@requires_auth
def xhr_headphones_upcoming(mobile=False):
logger.log('HEADPHONES :: Fetching upcoming albums', 'INFO')
try:
upcoming = headphones_api('getUpcoming')
except Exception as e:
return headphones_exception(e)
if upcoming == []:
upcoming = 'empty'
if not mobile:
for album in upcoming:
try:
album['ThumbURL'] = hp_albumart(album['AlbumID'])
except:
pass
try:
wanted = headphones_api('getWanted')
except Exception as e:
return headphones_exception(e)
if wanted == []:
wanted = 'empty'
if not mobile:
for album in wanted:
try:
album['ThumbURL'] = hp_albumart(album['AlbumID'])
except:
pass
if mobile:
return [upcoming, wanted]
return render_template('headphones.html',
upcoming=upcoming,
wanted=wanted,
headphones=True,
compact=hp_compact(),
)
@app.route('/xhr/headphones/similar/')
@requires_auth
def xhr_headphones_similar():
logger.log('HEADPHONES :: Fetching similar artists', 'INFO')
try:
headphones = headphones_api('getSimilar')
except Exception as e:
return headphones_exception(e)
return render_template('headphones/similar.html',
similar=headphones,
headphones=True,
)
@app.route('/xhr/headphones/history/')
@requires_auth
def xhr_headphones_history(mobile=False):
logger.log('HEADPHONES :: Fetching history', 'INFO')
try:
headphones = headphones_api('getHistory')
except Exception as e:
return headphones_exception(e)
if mobile:
return headphones
return render_template('headphones/history.html',
history=headphones,
headphones=True,
)
@app.route('/xhr/headphones/search/<type>/<query>/')
@requires_auth
def xhr_headphones_search(type, query, mobile=False):
if type == 'artist':
logger.log('HEADPHONES :: Searching for artist', 'INFO')
command = 'findArtist&name=%s' % urllib.quote(query)
else:
logger.log('HEADPHONES :: Searching for album', 'INFO')
command = 'findAlbum&name=%s' % urllib.quote(query)
try:
headphones = headphones_api(command)
except Exception as e:
return headphones_exception(e)
for artist in headphones:
artist['url'].replace('\/', '/')
if mobile:
return headphones
return render_template('headphones/search_dialog.html',
headphones=True,
search=headphones,
query=query
)
@app.route('/xhr/headphones/artist/<artistid>/<action>/')
@requires_auth
def xhr_headphones_artist_action(artistid, action, mobile=False):
if action == 'pause':
logger.log('HEADPHONES :: Pausing artist', 'INFO')
command = 'pauseArtist&id=%s' % artistid
elif action == 'resume':
logger.log('HEADPHONES :: Resuming artist', 'INFO')
command = 'resumeArtist&id=%s' % artistid
elif action == 'refresh':
logger.log('HEADPHONES :: Refreshing artist', 'INFO')
command = 'refreshArtist&id=%s' % artistid
elif action == 'remove':
logger.log('HEADPHONES :: Removing artist', 'INFO')
command = 'delArtist&id=%s' % artistid
elif action == 'add':
logger.log('HEADPHONES :: Adding artist', 'INFO')
command = 'addArtist&id=%s' % artistid
try:
if command == 'remove':
headphones_api(command, False)
elif command == 'pause':
headphones_api(command, False)
elif command == 'resume':
headphones_api(command, False)
else:
Thread(target=headphones_api, args=(command, False)).start()
except Exception as e:
if mobile:
headphones_exception(e)
return jsonify(error='failed')
return headphones_exception(e)
return jsonify(status='successful')
@app.route('/xhr/headphones/album/<albumid>/<status>/')
@requires_auth
def xhr_headphones_album_status(albumid, status, mobile=False):
if status == 'wanted':
logger.log('HEADPHONES :: Marking album as wanted', 'INFO')
command = 'queueAlbum&id=%s' % albumid
if status == 'wanted_new':
logger.log('HEADPHONES :: Marking album as wanted (new)', 'INFO')
command = 'queueAlbum&new=True&id=%s' % albumid
if status == 'skipped':
logger.log('HEADPHONES :: Marking album as skipped', 'INFO')
command = 'unqueueAlbum&id=%s' % albumid
try:
Thread(target=headphones_api, args=(command, False)).start()
except Exception as e:
if mobile:
headphones_exception(e)
return jsonify(error='failed')
return headphones_exception(e)
return jsonify(status='successful')
@app.route('/xhr/headphones/control/<command>/')
@requires_auth
def xhr_headphones_control(command):
if command == 'shutdown':
logger.log('HEADPHONES :: Shutting down', 'INFO')
elif command == 'restart':
logger.log('HEADPHONES :: Restarting', 'INFO')
elif command == 'update':
logger.log('HEADPHONES :: Updating', 'INFO')
elif command == 'force_search':
logger.log('HEADPHONES :: Forcing wanted album search', 'INFO')
command = 'forceSearch'
elif command == 'force_process':
logger.log('HEADPHONES :: Forcing post process', 'INFO')
command = 'forceProcess'
try:
Thread(target=headphones_api, args=(command, False)).start()
except Exception as e:
return headphones_exception(e)
return jsonify(status='successful')
|
utils_test.py | import asyncio
import collections
import gc
from contextlib import contextmanager
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import textwrap
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from tlz import merge, memoize, assoc
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
ignoring,
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
def compile_snippet(code, dedent=True):
if dedent:
code = textwrap.dedent(code)
code = compile(code, "<dynamic>", "exec")
ns = globals()
exec(code, ns, ns)
if sys.version_info >= (3, 5):
compile_snippet(
"""
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
"""
)
assert asyncinc # noqa: F821
else:
asyncinc = None
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with ignoring(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with ignoring(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with ignoring(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if not iscoroutinefunction(func):
raise ValueError("@gen_test should wrap async def functions")
loop.run_sync(func, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with ignoring(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
def test_func():
if not iscoroutinefunction(func):
raise ValueError("@gen_cluster should wrap async def functions")
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == "closed")
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 5:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
if sys.version_info[0] == 3:
proc.wait(10)
else:
start = time()
while proc.poll() is None and time() < start + 10:
sleep(0.02)
finally:
# Make sure we don't leave the process lingering around
with ignoring(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(
port, protocol="tcp", **kwargs,
):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler.
"""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(100):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with ignoring(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == "running":
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(n.status == "closed" or n.status == "init" for n in Nanny._instances), {
n: n.status for n in Nanny._instances
}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == "closed" for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with ignoring(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with check_thread_leak():
with check_process_leak():
with check_instances():
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield
|
decision_loop.py | from __future__ import annotations
import asyncio
import contextvars
import datetime
import json
import uuid
import random
import logging
import threading
from asyncio.base_futures import CancelledError
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
from asyncio.tasks import Task
from collections import OrderedDict
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Dict, Optional, Any, Callable
from more_itertools import peekable
from cadence.activity_method import ExecuteActivityParameters
from cadence.cadence_types import PollForDecisionTaskRequest, TaskList, PollForDecisionTaskResponse, \
RespondDecisionTaskCompletedRequest, \
CompleteWorkflowExecutionDecisionAttributes, Decision, DecisionType, RespondDecisionTaskCompletedResponse, \
HistoryEvent, EventType, WorkflowType, ScheduleActivityTaskDecisionAttributes, \
CancelWorkflowExecutionDecisionAttributes, StartTimerDecisionAttributes, TimerFiredEventAttributes
from cadence.conversions import json_to_args
from cadence.decisions import DecisionId, DecisionTarget
from cadence.exceptions import WorkflowTypeNotFound, NonDeterministicWorkflowException, ActivityTaskFailedException, \
ActivityTaskTimeoutException, SignalNotFound
from cadence.state_machines import ActivityDecisionStateMachine, DecisionStateMachine, CompleteWorkflowStateMachine, \
TimerDecisionStateMachine
from cadence.tchannel import TChannelException
from cadence.worker import Worker
from cadence.workflowservice import WorkflowService
logger = logging.getLogger(__name__)
def is_decision_event(event: HistoryEvent) -> bool:
decision_event_types = (EventType.ActivityTaskScheduled,
EventType.StartChildWorkflowExecutionInitiated,
EventType.TimerStarted,
EventType.WorkflowExecutionCompleted,
EventType.WorkflowExecutionFailed,
EventType.WorkflowExecutionCanceled,
EventType.WorkflowExecutionContinuedAsNew,
EventType.ActivityTaskCancelRequested,
EventType.RequestCancelActivityTaskFailed,
EventType.TimerCanceled,
EventType.CancelTimerFailed,
EventType.RequestCancelExternalWorkflowExecutionInitiated,
EventType.MarkerRecorded,
EventType.SignalExternalWorkflowExecutionInitiated)
return event.event_type in decision_event_types
def nano_to_milli(nano):
return nano/(1000 * 1000)
class HistoryHelper:
def __init__(self, events: List[HistoryEvent]):
self.events = peekable(events)
def has_next(self) -> bool:
try:
self.events.peek()
return True
except StopIteration:
return False
def next(self) -> Optional[DecisionEvents]:
events = self.events
if not self.has_next():
return None
decision_events: List[HistoryEvent] = []
new_events: List[HistoryEvent] = []
replay = True
next_decision_event_id = -1
# noinspection PyUnusedLocal
event: HistoryEvent
for event in events:
event_type = event.event_type
if event_type == EventType.DecisionTaskStarted or not self.has_next():
replay_current_time_milliseconds = nano_to_milli(event.timestamp)
if not self.has_next():
replay = False
next_decision_event_id = event.event_id + 2
break
peeked: HistoryEvent = events.peek()
peeked_type = peeked.event_type
if peeked_type == EventType.DecisionTaskTimedOut or peeked_type == EventType.DecisionTaskFailed:
continue
elif peeked_type == EventType.DecisionTaskCompleted:
next(events)
next_decision_event_id = peeked.event_id + 1
break
else:
raise Exception(
"Unexpected event after DecisionTaskStarted: {}".format(peeked))
new_events.append(event)
while self.has_next():
if not is_decision_event(events.peek()):
break
decision_events.append(next(events))
result = DecisionEvents(new_events, decision_events, replay,
replay_current_time_milliseconds, next_decision_event_id)
logger.debug("HistoryHelper next=%s", result)
return result
@dataclass
class DecisionEvents:
events: List[HistoryEvent]
decision_events: List[HistoryEvent]
replay: bool
replay_current_time_milliseconds: int
next_decision_event_id: int
class Status(Enum):
CREATED = 1
RUNNING = 2
DONE = 3
current_task = contextvars.ContextVar("current_task")
@dataclass
class ITask:
decider: ReplayDecider = None
task: Task = None
status: Status = Status.CREATED
awaited: Future = None
def is_done(self):
return self.status == Status.DONE
def destroy(self):
if self.status == Status.RUNNING:
self.status = Status.DONE
self.task.cancel()
def start(self):
pass
async def await_till(self, c: Callable, timeout_seconds: int = 0) -> bool:
timer_cancellation_handler: TimerCancellationHandler = None
timer_fired = False
def timer_callback(ex: Exception):
nonlocal timer_fired
if not ex:
timer_fired = True
if timeout_seconds:
timer_cancellation_handler = self.decider.decision_context.create_timer(delay_seconds=timeout_seconds, callback=timer_callback)
while not c() and not timer_fired:
self.awaited = self.decider.event_loop.create_future()
await self.awaited
assert self.awaited.done()
self.awaited = None
if timer_fired:
return False
if timer_cancellation_handler:
timer_cancellation_handler.accept(None)
return True
def unblock(self):
if self.awaited:
self.awaited.set_result(None)
@staticmethod
def current() -> ITask:
return current_task.get()
@dataclass
class WorkflowMethodTask(ITask):
task_id: str = None
workflow_input: List = None
worker: Worker = None
workflow_type: WorkflowType = None
workflow_instance: object = None
ret_value: object = None
exception_thrown: BaseException = None
def __post_init__(self):
logger.debug(f"[task-{self.task_id}] Created")
self.task = asyncio.get_event_loop().create_task(self.init_workflow_instance())
async def init_workflow_instance(self):
current_task.set(self)
cls, _ = self.worker.get_workflow_method(self.workflow_type.name)
try:
self.workflow_instance = cls()
self.task = asyncio.get_event_loop().create_task(self.workflow_main())
except Exception as ex:
logger.error(
f"Initialization of Workflow {self.workflow_type.name}({str(self.workflow_input)[1:-1]}) failed", exc_info=1)
self.exception_thrown = ex
self.status = Status.DONE
async def workflow_main(self):
logger.debug(f"[task-{self.task_id}] Running")
if self.is_done():
return
current_task.set(self)
if self.workflow_type.name not in self.worker.workflow_methods:
self.status = Status.DONE
self.exception_thrown = WorkflowTypeNotFound(self.workflow_type.name)
logger.error(f"Workflow type not found: {self.workflow_type.name}")
return
cls, workflow_proc = self.worker.workflow_methods[self.workflow_type.name]
self.status = Status.RUNNING
try:
logger.info(f"Invoking workflow {self.workflow_type.name}({str(self.workflow_input)[1:-1]})")
self.ret_value = await workflow_proc(self.workflow_instance, *self.workflow_input)
logger.info(
f"Workflow {self.workflow_type.name}({str(self.workflow_input)[1:-1]}) returned {self.ret_value}")
self.decider.complete_workflow_execution(self.ret_value)
except CancelledError:
logger.debug("Coroutine cancelled (expected)")
except Exception as ex:
logger.error(
f"Workflow {self.workflow_type.name}({str(self.workflow_input)[1:-1]}) failed", exc_info=1)
self.exception_thrown = ex
finally:
self.status = Status.DONE
def get_workflow_instance(self):
return self.workflow_instance
@dataclass
class SignalMethodTask(ITask):
task_id: str = None
workflow_instance: object = None
signal_name: str = None
signal_input: List = None
exception_thrown: BaseException = None
ret_value: object = None
def start(self):
logger.debug(f"[signal-task-{self.task_id}-{self.signal_name}] Created")
self.task = asyncio.get_event_loop().create_task(self.signal_main())
async def signal_main(self):
logger.debug(f"[signal-task-{self.task_id}-{self.signal_name}] Running")
current_task.set(self)
if not self.signal_name in self.workflow_instance._signal_methods:
self.status = Status.DONE
self.exception_thrown = SignalNotFound(self.signal_name)
logger.error(f"Signal not found: {self.signal_name}")
return
signal_proc = self.workflow_instance._signal_methods[self.signal_name]
self.status = Status.RUNNING
try:
logger.info(f"Invoking signal {self.signal_name}({str(self.signal_input)[1:-1]})")
self.ret_value = await signal_proc(self.workflow_instance, *self.signal_input)
logger.info(
f"Signal {self.signal_name}({str(self.signal_input)[1:-1]}) returned {self.ret_value}")
self.decider.complete_signal_execution(self)
except CancelledError:
logger.debug("Coroutine cancelled (expected)")
except Exception as ex:
logger.error(
f"Signal {self.signal_name}({str(self.signal_input)[1:-1]}) failed", exc_info=1)
self.exception_thrown = ex
finally:
self.status = Status.DONE
@dataclass
class EventLoopWrapper:
event_loop: AbstractEventLoop = None
def __post_init__(self):
self.event_loop = asyncio.get_event_loop()
def run_event_loop_once(self):
self.event_loop.call_soon(self.event_loop.stop)
self.event_loop.run_forever()
def create_future(self) -> Future[Any]:
return self.event_loop.create_future()
@dataclass
class DecisionContext:
decider: ReplayDecider
scheduled_activities: Dict[int, Future[bytes]] = field(default_factory=dict)
workflow_clock: ClockDecisionContext = None
current_run_id: str = None
def __post_init__(self):
if not self.workflow_clock:
self.workflow_clock = ClockDecisionContext(self.decider)
async def schedule_activity_task(self, parameters: ExecuteActivityParameters):
attr = ScheduleActivityTaskDecisionAttributes()
attr.activity_type = parameters.activity_type
attr.input = parameters.input
if parameters.heartbeat_timeout_seconds > 0:
attr.heartbeat_timeout_seconds = parameters.heartbeat_timeout_seconds
attr.schedule_to_close_timeout_seconds = parameters.schedule_to_close_timeout_seconds
attr.schedule_to_start_timeout_seconds = parameters.schedule_to_start_timeout_seconds
attr.start_to_close_timeout_seconds = parameters.start_to_close_timeout_seconds
attr.activity_id = parameters.activity_id
if not attr.activity_id:
attr.activity_id = self.decider.get_and_increment_next_id()
attr.task_list = TaskList()
attr.task_list.name = parameters.task_list
# PORT: RetryParameters retryParameters = parameters.getRetryParameters();
# PORT: if (retryParameters != null) {
# PORT: attributes.setRetryPolicy(retryParameters.toRetryPolicy());
# PORT: }
scheduled_event_id = self.decider.schedule_activity_task(schedule=attr)
future = self.decider.event_loop.create_future()
self.scheduled_activities[scheduled_event_id] = future
await future
assert future.done()
exception = future.exception()
if exception:
raise exception
raw_bytes = future.result()
return json.loads(str(raw_bytes, "utf-8"))
async def schedule_timer(self, seconds: int):
future = self.decider.event_loop.create_future()
def callback(ex: Exception):
nonlocal future
if ex:
future.set_exception(ex)
else:
future.set_result("time-fired")
self.decider.decision_context.create_timer(delay_seconds=seconds, callback=callback)
await future
assert future.done()
exception = future.exception()
if exception:
raise exception
return
def handle_activity_task_completed(self, event: HistoryEvent):
attr = event.activity_task_completed_event_attributes
if self.decider.handle_activity_task_closed(attr.scheduled_event_id):
future = self.scheduled_activities.get(attr.scheduled_event_id)
if future:
self.scheduled_activities.pop(attr.scheduled_event_id)
future.set_result(attr.result)
else:
raise NonDeterministicWorkflowException(
f"Trying to complete activity event {attr.scheduled_event_id} that is not in scheduled_activities")
def handle_activity_task_failed(self, event: HistoryEvent):
attr = event.activity_task_failed_event_attributes
if self.decider.handle_activity_task_closed(attr.scheduled_event_id):
future = self.scheduled_activities.get(attr.scheduled_event_id)
if future:
self.scheduled_activities.pop(attr.scheduled_event_id)
ex = ActivityTaskFailedException(attr.reason, attr.details)
future.set_exception(ex)
else:
raise NonDeterministicWorkflowException(
f"Trying to complete activity event {attr.scheduled_event_id} that is not in scheduled_activities")
def handle_activity_task_timed_out(self, event: HistoryEvent):
attr = event.activity_task_timed_out_event_attributes
if self.decider.handle_activity_task_closed(attr.scheduled_event_id):
future = self.scheduled_activities.get(attr.scheduled_event_id)
if future:
self.scheduled_activities.pop(attr.scheduled_event_id)
ex = ActivityTaskTimeoutException(event.event_id, attr.timeout_type, attr.details)
future.set_exception(ex)
else:
raise NonDeterministicWorkflowException(
f"Trying to complete activity event {attr.scheduled_event_id} that is not in scheduled_activities")
def create_timer(self, delay_seconds: int, callback: Callable):
return self.workflow_clock.create_timer(delay_seconds, callback)
def set_replay_current_time_milliseconds(self, replay_current_time_milliseconds: int):
if replay_current_time_milliseconds < self.workflow_clock.current_time_millis():
raise Exception("workflow clock moved back")
self.workflow_clock.set_replay_current_time_milliseconds(replay_current_time_milliseconds)
def current_time_millis(self):
return self.workflow_clock.current_time_millis()
def set_replaying(self, replaying: bool):
self.workflow_clock.set_replaying(replaying)
def is_replaying(self):
return self.workflow_clock.is_replaying()
def handle_timer_fired(self, attributes: TimerFiredEventAttributes):
self.workflow_clock.handle_timer_fired(attributes)
def handle_timer_canceled(self, event: HistoryEvent):
self.workflow_clock.handle_timer_canceled(event)
def set_current_run_id(self, run_id: str):
self.current_run_id = run_id
def random_uuid(self) -> uuid.UUID:
return uuid.uuid3(uuid.UUID(self.current_run_id), str(self.decider.get_and_increment_next_id()))
def new_random(self) -> random.Random:
random_uuid = self.random_uuid()
lsb = random_uuid.bytes[:8]
generator = random.Random()
generator.seed(lsb, version=2)
return generator
@dataclass
class ReplayDecider:
execution_id: str
workflow_type: WorkflowType
worker: Worker
workflow_task: WorkflowMethodTask = None
tasks: List[ITask] = field(default_factory=list)
event_loop: EventLoopWrapper = field(default_factory=EventLoopWrapper)
completed: bool = False
next_decision_event_id: int = 0
id_counter: int = 0
decision_events: DecisionEvents = None
decisions: OrderedDict[DecisionId, DecisionStateMachine] = field(default_factory=OrderedDict)
decision_context: DecisionContext = None
activity_id_to_scheduled_event_id: Dict[str, int] = field(default_factory=dict)
def __post_init__(self):
self.decision_context = DecisionContext(decider=self)
def decide(self, events: List[HistoryEvent]):
helper = HistoryHelper(events)
while helper.has_next():
decision_events = helper.next()
self.process_decision_events(decision_events)
return self.get_decisions()
def process_decision_events(self, decision_events: DecisionEvents):
self.decision_context.set_replaying(decision_events.replay)
self.decision_context.set_replay_current_time_milliseconds(decision_events.replay_current_time_milliseconds)
self.handle_decision_task_started(decision_events)
for event in decision_events.events:
self.process_event(event)
if self.completed:
return
self.unblock_all()
self.event_loop.run_event_loop_once()
if decision_events.replay:
self.notify_decision_sent()
for event in decision_events.decision_events:
self.process_event(event)
def unblock_all(self):
for t in self.tasks:
t.unblock()
def process_event(self, event: HistoryEvent):
event_handler = event_handlers.get(event.event_type)
if not event_handler:
raise Exception(f"No event handler for event type {event.event_type.name}")
event_handler(self, event)
def handle_workflow_execution_started(self, event: HistoryEvent):
start_event_attributes = event.workflow_execution_started_event_attributes
self.decision_context.set_current_run_id(start_event_attributes.original_execution_run_id)
if start_event_attributes.input is None:
workflow_input = []
else:
workflow_input = json_to_args(start_event_attributes.input)
self.workflow_task = WorkflowMethodTask(task_id=self.execution_id, workflow_input=workflow_input,
worker=self.worker, workflow_type=self.workflow_type, decider=self)
self.event_loop.run_event_loop_once()
assert self.workflow_task.workflow_instance
self.tasks.append(self.workflow_task)
def handle_workflow_execution_cancel_requested(self, event: HistoryEvent):
self.cancel_workflow_execution()
def notify_decision_sent(self):
for state_machine in self.decisions.values():
if state_machine.get_decision():
state_machine.handle_decision_task_started_event()
def handle_decision_task_started(self, decision_events: DecisionEvents):
self.decision_events = decision_events
self.next_decision_event_id = decision_events.next_decision_event_id
def complete_workflow_execution(self, ret_value):
# PORT: addAllMissingVersionMarker(false, Optional.empty());
decision = Decision()
attr = CompleteWorkflowExecutionDecisionAttributes()
attr.result = json.dumps(ret_value)
decision.complete_workflow_execution_decision_attributes = attr
decision.decision_type = DecisionType.CompleteWorkflowExecution
decision_id = DecisionId(DecisionTarget.SELF, 0)
self.add_decision(decision_id, CompleteWorkflowStateMachine(decision_id, decision))
self.completed = True
def cancel_workflow_execution(self):
logger.info("Canceling workflow: %s", self.execution_id)
decision = Decision()
attr = CancelWorkflowExecutionDecisionAttributes()
attr.details = None
decision.cancel_workflow_execution_decision_attributes = attr
decision.decision_type = DecisionType.CancelWorkflowExecution
decision_id = DecisionId(DecisionTarget.SELF, 0)
self.add_decision(decision_id, CompleteWorkflowStateMachine(decision_id, decision))
self.completed = True
def schedule_activity_task(self, schedule: ScheduleActivityTaskDecisionAttributes) -> int:
# PORT: addAllMissingVersionMarker(false, Optional.empty());
next_decision_event_id = self.next_decision_event_id
decision_id = DecisionId(DecisionTarget.ACTIVITY, next_decision_event_id)
self.activity_id_to_scheduled_event_id[schedule.activity_id] = next_decision_event_id
self.add_decision(decision_id, ActivityDecisionStateMachine(decision_id, schedule_attributes=schedule))
return next_decision_event_id
def complete_signal_execution(self, task: SignalMethodTask):
task.destroy()
self.tasks.remove(task)
def handle_activity_task_closed(self, scheduled_event_id: int) -> bool:
decision: DecisionStateMachine = self.get_decision(DecisionId(DecisionTarget.ACTIVITY, scheduled_event_id))
assert decision
decision.handle_completion_event()
return decision.is_done()
def handle_activity_task_scheduled(self, event: HistoryEvent):
decision = self.get_decision(DecisionId(DecisionTarget.ACTIVITY, event.event_id))
decision.handle_initiated_event(event)
def handle_activity_task_started(self, event: HistoryEvent):
attr = event.activity_task_started_event_attributes
decision = self.get_decision(DecisionId(DecisionTarget.ACTIVITY, attr.scheduled_event_id))
decision.handle_started_event(event)
def handle_activity_task_completed(self, event: HistoryEvent):
self.decision_context.handle_activity_task_completed(event)
def handle_activity_task_failed(self, event: HistoryEvent):
self.decision_context.handle_activity_task_failed(event)
def handle_activity_task_timed_out(self, event: HistoryEvent):
self.decision_context.handle_activity_task_timed_out(event)
def handle_workflow_execution_signaled(self, event: HistoryEvent):
signaled_event_attributes = event.workflow_execution_signaled_event_attributes
signal_input = signaled_event_attributes.input
if not signal_input:
signal_input = []
else:
signal_input = json_to_args(signal_input)
task = SignalMethodTask(task_id=self.execution_id,
workflow_instance=self.workflow_task.workflow_instance,
signal_name=signaled_event_attributes.signal_name,
signal_input=signal_input,
decider=self)
self.tasks.append(task)
task.start()
def add_decision(self, decision_id: DecisionId, decision: DecisionStateMachine):
self.decisions[decision_id] = decision
self.next_decision_event_id += 1
def get_and_increment_next_id(self) -> str:
ret_value = str(self.id_counter)
self.id_counter += 1
return ret_value
def get_decision(self, decision_id: DecisionId) -> DecisionStateMachine:
result: DecisionStateMachine = self.decisions.get(decision_id)
if not result:
raise NonDeterministicWorkflowException(f"Unknown {decision_id}.")
return result
def get_decisions(self) -> List[Decision]:
decisions = []
for state_machine in self.decisions.values():
d = state_machine.get_decision()
if d:
decisions.append(d)
# PORT: // Include FORCE_IMMEDIATE_DECISION timer only if there are more then 100 events
# PORT: int size = result.size();
# PORT: if (size > MAXIMUM_DECISIONS_PER_COMPLETION &&
# PORT: !isCompletionEvent(result.get(MAXIMUM_DECISIONS_PER_COMPLETION - 2))) {
# PORT: result = result.subList(0, MAXIMUM_DECISIONS_PER_COMPLETION - 1);
# PORT: StartTimerDecisionAttributes attributes = new StartTimerDecisionAttributes();
# PORT: attributes.setStartToFireTimeoutSeconds(0);
# PORT: attributes.setTimerId(FORCE_IMMEDIATE_DECISION_TIMER);
# PORT: Decision d = new Decision();
# PORT: d.setStartTimerDecisionAttributes(attributes);
# PORT: d.setDecisionType(DecisionType.StartTimer);
# PORT: result.add(d);
# PORT: }
return decisions
def destroy(self):
if self.workflow_task:
self.workflow_task.destroy()
def start_timer(self, request: StartTimerDecisionAttributes):
start_event_id = self.next_decision_event_id
decision_id = DecisionId(DecisionTarget.TIMER, start_event_id)
self.add_decision(decision_id, TimerDecisionStateMachine(decision_id, start_timer_attributes=request))
return start_event_id
def cancel_timer(self, start_event_id: int, immediate_cancellation_callback: Callable):
decision: DecisionStateMachine = self.get_decision(DecisionId(DecisionTarget.TIMER, start_event_id))
if decision.is_done():
return True
if decision.cancel(immediate_cancellation_callback):
self.next_decision_event_id += 1
return decision.is_done()
def handle_timer_closed(self, attributes: TimerFiredEventAttributes) -> bool:
decision = self.get_decision(DecisionId(DecisionTarget.TIMER, attributes.started_event_id))
decision.handle_completion_event()
return decision.is_done()
def handle_timer_canceled(self, event: HistoryEvent) -> bool:
attributes = event.timer_canceled_event_attributes
decision = self.get_decision(DecisionId(DecisionTarget.TIMER, attributes.started_event_id))
decision.handle_cancellation_event()
return decision.is_done()
def handle_cancel_timer_failed(self, event: HistoryEvent) -> bool:
started_event_id = event.event_id
decision = self.get_decision(DecisionId(DecisionTarget.TIMER, started_event_id))
decision.handle_cancellation_failure_event(event)
return decision.is_done()
def handle_timer_started(self, event: HistoryEvent):
decision = self.get_decision(DecisionId(DecisionTarget.TIMER, event.event_id))
decision.handle_initiated_event(event)
def handle_timer_fired(self, event: HistoryEvent):
attributes = event.timer_fired_event_attributes
self.decision_context.handle_timer_fired(attributes)
# noinspection PyUnusedLocal
def noop(*args):
pass
def on_timer_canceled(self: ReplayDecider, event: HistoryEvent):
self.decision_context.handle_timer_canceled(event)
event_handlers = {
EventType.WorkflowExecutionStarted: ReplayDecider.handle_workflow_execution_started,
EventType.WorkflowExecutionCancelRequested: ReplayDecider.handle_workflow_execution_cancel_requested,
EventType.DecisionTaskScheduled: noop,
EventType.DecisionTaskStarted: noop, # Filtered by HistoryHelper
EventType.DecisionTaskTimedOut: noop, # TODO: check
EventType.ActivityTaskScheduled: ReplayDecider.handle_activity_task_scheduled,
EventType.ActivityTaskStarted: ReplayDecider.handle_activity_task_started,
EventType.ActivityTaskCompleted: ReplayDecider.handle_activity_task_completed,
EventType.ActivityTaskFailed: ReplayDecider.handle_activity_task_failed,
EventType.ActivityTaskTimedOut: ReplayDecider.handle_activity_task_timed_out,
EventType.WorkflowExecutionSignaled: ReplayDecider.handle_workflow_execution_signaled,
EventType.TimerFired: ReplayDecider.handle_timer_fired,
EventType.TimerStarted: ReplayDecider.handle_timer_started,
EventType.TimerCanceled: on_timer_canceled,
EventType.CancelTimerFailed: ReplayDecider.handle_cancel_timer_failed
}
@dataclass
class DecisionTaskLoop:
worker: Worker
service: WorkflowService = None
deciders: Dict[str, ReplayDecider] = field(default_factory=dict)
def __post_init__(self):
pass
def start(self):
thread = threading.Thread(target=self.run)
thread.start()
def run(self):
try:
logger.info(f"Decision task worker started: {WorkflowService.get_identity()}")
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
self.service = WorkflowService.create(self.worker.host, self.worker.port)
while True:
if self.worker.is_stop_requested():
return
decision_task: PollForDecisionTaskResponse = self.poll()
if not decision_task:
continue
decisions = self.process_task(decision_task)
self.respond_decisions(decision_task.task_token, decisions)
finally:
# noinspection PyPep8,PyBroadException
try:
self.service.close()
except:
logger.warning("service.close() failed", exc_info=1)
self.worker.notify_thread_stopped()
def poll(self) -> Optional[PollForDecisionTaskResponse]:
try:
polling_start = datetime.datetime.now()
poll_decision_request = PollForDecisionTaskRequest()
poll_decision_request.identity = WorkflowService.get_identity()
poll_decision_request.task_list = TaskList()
poll_decision_request.task_list.name = self.worker.task_list
poll_decision_request.domain = self.worker.domain
# noinspection PyUnusedLocal
task: PollForDecisionTaskResponse
task, err = self.service.poll_for_decision_task(poll_decision_request)
polling_end = datetime.datetime.now()
logger.debug("PollForDecisionTask: %dms", (polling_end - polling_start).total_seconds() * 1000)
except TChannelException as ex:
logger.error("PollForDecisionTask error: %s", ex)
return None
if err:
logger.error("PollForDecisionTask failed: %s", err)
return None
if not task.task_token:
logger.debug("PollForActivityTask has no task token (expected): %s", task)
return None
return task
def process_task(self, decision_task: PollForDecisionTaskResponse) -> List[Decision]:
execution_id = str(decision_task.workflow_execution)
decider = ReplayDecider(execution_id, decision_task.workflow_type, self.worker)
decisions: List[Decision] = decider.decide(decision_task.history.events)
decider.destroy()
return decisions
def respond_decisions(self, task_token: bytes, decisions: List[Decision]):
service = self.service
request = RespondDecisionTaskCompletedRequest()
request.task_token = task_token
request.decisions.extend(decisions)
request.identity = WorkflowService.get_identity()
# noinspection PyUnusedLocal
response: RespondDecisionTaskCompletedResponse
response, err = service.respond_decision_task_completed(request)
if err:
logger.error("Error invoking RespondDecisionTaskCompleted: %s", err)
else:
logger.debug("RespondDecisionTaskCompleted: %s", response)
from cadence.clock_decision_context import ClockDecisionContext, TimerCancellationHandler
|
parallel.py | '''
Created on Aug 22, 2014
@author: David Zwicker <dzwicker@seas.harvard.edu>
This module contains convenience functions for scanning multiple
mouse videos in parallel.
'''
import os
import multiprocessing as mp
import time
from .simple import scan_video
from .algorithm.data_handler import load_any_video
from video.io.base import VideoFork
from video.io.parallel import create_video_pipe
from video.filters import FilterCrop
# dictionary defining the four quadrants
QUADRANTS = {'UL': 'upper left',
'DL': 'lower left',
'UR': 'upper right',
'DR': 'lower right'}
def get_window_pos(location, video_size, offset=100):
""" calculate the window position given a location string and the size
of the total video """
width, height = video_size[0]//2, video_size[1]//2
if location == 'upper left':
return 0, 0
elif location == 'lower left':
return 0, height - offset
elif location == 'upper right':
return width - offset, 0
elif location == 'lower right':
return width - offset, height - offset
else:
raise ValueError('Unknown location `%s`' % location)
def scan_video_quadrants(video=None, parameters=None, **kwargs):
""" Takes a video and scans all four quadrants in parallel.
Here, the video is read in one process, split into four video streams
and analyzed in four separate processes
Additional parameters include a dictionary 'parameters'
"""
if parameters is None:
parameters = {}
# load video if it is not supplied
if video is None:
video_filename_pattern = os.path.join(parameters['base_folder'],
parameters['video/filename_pattern'])
video = load_any_video(video_filename_pattern)
# make sure that scan_video does not crop the video, since we already do
# it in this process (see below)
kwargs['crop_video'] = False
# create a fork, such that the data can be analyzed by multiple consumers
video_fork = VideoFork(video, synchronized=True, client_count=len(QUADRANTS))
senders = []
for name, crop in QUADRANTS.iteritems():
# save the cropping rectangle for further analysis later
parameters['video/cropping_rect'] = crop
parameters['debug/window_position'] = get_window_pos(crop, video_fork.size)
kwargs['parameters'] = parameters
# crop the video to the right region
video_crop = FilterCrop(video_fork.get_client(), region=crop,
color_channel=1)
# construct the video sender
sender, receiver = create_video_pipe(video_crop, name=name)
# launch a new process, where the receiver is going to live
proc = mp.Process(target=scan_video, args=(name, receiver),
kwargs=kwargs)
proc.start()
senders.append(sender)
try:
# start the main loop where we check all senders periodically
while any(sender.running for sender in senders):
# check if any senders are running
for sender in senders:
sender.check()
# let the CPU rest a little
time.sleep(0.001)
except (KeyboardInterrupt, SystemExit):
# try to interrupt the system cleanly
for sender in senders:
sender.abort_iteration()
|
sorted.py | import os
from queue import Queue
from threading import Thread
import pandas as pd
import tensorflow as tf
import collections
import args
import tokenization
import modeling
import optimization
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class SimProcessor(DataProcessor):
def get_train_examples(self, data_dir):
file_path = os.path.join(data_dir, 'train.csv')
train_df = pd.read_csv(file_path, encoding='utf-8')
train_data = []
for index, train in enumerate(train_df.values):
guid = 'train-%d' % index
text_a = tokenization.convert_to_unicode(str(train[0]))
text_b = tokenization.convert_to_unicode(str(train[1]))
label = str(train[2])
train_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return train_data
def get_dev_examples(self, data_dir):
file_path = os.path.join(data_dir, 'dev.csv')
dev_df = pd.read_csv(file_path, encoding='utf-8')
dev_data = []
for index, dev in enumerate(dev_df.values):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(dev[0]))
text_b = tokenization.convert_to_unicode(str(dev[1]))
label = str(dev[2])
dev_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return dev_data
def get_test_examples(self, data_dir):
file_path = os.path.join(data_dir, 'test.csv')
test_df = pd.read_csv(file_path, encoding='utf-8')
test_data = []
for index, test in enumerate(test_df.values):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(test[0]))
text_b = tokenization.convert_to_unicode(str(test[1]))
label = str(test[2])
test_data.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return test_data
def get_sentence_examples(self, questions):
for index, data in enumerate(questions):
guid = 'test-%d' % index
text_a = tokenization.convert_to_unicode(str(data[0]))
text_b = tokenization.convert_to_unicode(str(data[1]))
label = str(0)
yield InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def get_labels(self):
return ['0', '1']
class BertSim:
def __init__(self, batch_size=args.batch_size):
self.mode = None
self.max_seq_length = args.max_seq_len
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = None
self.processor = SimProcessor()
tf.logging.set_verbosity(tf.logging.INFO)
def set_mode(self, mode):
self.mode = mode
self.estimator = self.get_estimator()
if mode == tf.estimator.ModeKeys.PREDICT:
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(self, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings):
"""Returns `model_fn` closurimport_tfe for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
from tensorflow.python.estimator.model_fn import EstimatorSpec
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = BertSim.create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names) \
= modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, False)
output_spec = EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
auc = tf.metrics.auc(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_auc": auc,
"eval_loss": loss,
}
eval_metrics = metric_fn(per_example_loss, label_ids, logits)
output_spec = EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
else:
output_spec = EstimatorSpec(mode=mode, predictions=probabilities)
return output_spec
return model_fn
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
bert_config = modeling.BertConfig.from_json_file(args.config_name)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / self.batch_size * args.num_train_epochs)
num_warmup_steps = int(num_train_steps * 0.1)
if self.mode == tf.estimator.ModeKeys.TRAIN:
init_checkpoint = args.ckpt_name
else:
init_checkpoint = args.output_dir
model_fn = self.model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=init_checkpoint,
learning_rate=args.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
config.log_device_placement = False
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config), model_dir=args.output_dir,
params={'batch_size': self.batch_size})
def predict_from_queue(self):
for i in self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False):
self.output_queue.put(i)
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={
'input_ids': tf.int32,
'input_mask': tf.int32,
'segment_ids': tf.int32,
'label_ids': tf.int32},
output_shapes={
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'segment_ids': (None, self.max_seq_length),
'label_ids': (1,)}).prefetch(10))
def convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
for (ex_index, example) in enumerate(examples):
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
yield feature
def generate_from_queue(self):
while True:
predict_examples = self.processor.get_sentence_examples(self.input_queue.get())
features = list(self.convert_examples_to_features(predict_examples, self.processor.get_labels(),
args.max_seq_len, self.tokenizer))
yield {
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'segment_ids': [f.segment_ids for f in features],
'label_ids': [f.label_id for f in features]
}
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(self, ex_index, example, label_list, max_seq_length, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def file_based_convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def file_based_input_fn_builder(self, input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def train(self):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
bert_config = modeling.BertConfig.from_json_file(args.config_name)
if args.max_seq_len > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_len, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(args.output_dir)
label_list = self.processor.get_labels()
train_examples = self.processor.get_train_examples(args.data_dir)
num_train_steps = int(len(train_examples) / args.batch_size * args.num_train_epochs)
estimator = self.get_estimator()
train_file = os.path.join(args.output_dir, "train.tf_record")
self.file_based_convert_examples_to_features(train_examples, label_list, args.max_seq_len, self.tokenizer,
train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", args.batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = self.file_based_input_fn_builder(input_file=train_file, seq_length=args.max_seq_len,
is_training=True,
drop_remainder=True)
# early_stopping = tf.contrib.estimator.stop_if_no_decrease_hook(
# estimator,
# metric_name='loss',
# max_steps_without_decrease=10,
# min_steps=num_train_steps)
# estimator.train(input_fn=train_input_fn, hooks=[early_stopping])
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
def eval(self):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
eval_examples = self.processor.get_dev_examples(args.data_dir)
eval_file = os.path.join(args.output_dir, "eval.tf_record")
label_list = self.processor.get_labels()
self.file_based_convert_examples_to_features(
eval_examples, label_list, args.max_seq_len, self.tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", self.batch_size)
eval_input_fn = self.file_based_input_fn_builder(
input_file=eval_file,
seq_length=args.max_seq_len,
is_training=False,
drop_remainder=False)
estimator = self.get_estimator()
result = estimator.evaluate(input_fn=eval_input_fn, steps=None)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
def predict(self, sentence1, sentence2):
if self.mode is None:
raise ValueError("Please set the 'mode' parameter")
self.input_queue.put([(sentence1, sentence2)])
prediction = self.output_queue.get()
return prediction
if __name__ == '__main__':
sim = BertSim()
# sim.set_mode(tf.estimator.ModeKeys.TRAIN)
# sim.train()
sim.set_mode(tf.estimator.ModeKeys.EVAL)
sim.eval()
# sim.set_mode(tf.estimator.ModeKeys.PREDICT)
# while True:
# sentence1 = input('sentence1: ')
# sentence2 = input('sentence2: ')
# predict = sim.predict(sentence1, sentence2)
# print(f'similarity:{predict[0][1]}')
|
MinecraftSkinDownloader_ttkbootstrap.py | '''
作品名:MinecraftSkinDownloader
Github仓库地址:https://github.com/NewbieXvwu/MinecraftSkinDownloader
Gitee仓库地址:https://gitee.com/NewbieXvwu/MinecraftSkinDownloader
关于本程序:这是一个可以简单地下载任何Minecraft正版玩家的皮肤的软件,使用Python编写,由NewbieXvwu维护。
作者:NewbieXvwu
'''
version_int=2.4#程序主版本号
ispreview=False#程序是否是预览版
previewversion="0"#预览版本号(不自动更新)
if ispreview:#生成字符串版的版本号
version="v"+str(version_int)+" Preview "+previewversion
else:
version="v"+str(version_int)
#导入本地库(有些没用到,屎山懒得翻了)
from copyreg import clear_extension_cache
import ctypes, sys
import imp
import os
import json
import base64
import tkinter
from urllib.request import urlretrieve
import zipfile
import shutil
from tkinter import messagebox
import threading
import platform
import ctypes
import time
import winreg
global ThreadShouldStop
ThreadShouldStop=False
def on_closing():
tkinter.messagebox.showerror("错误","正在安装必要的运行库!\n强制退出会造成运行库损坏!")
#尝试安装并导入第三方库
try:
import requests
except:#没有安装requests
from tkinter import *
import tkinter
from tkinter.ttk import *
def sc_main_():
sc_=Tk()
#窗口居中
scw=sc_.winfo_screenwidth()
sch=sc_.winfo_screenheight()
w=300
h=200
x=(scw-w)/2
y=(sch-h)/2
sc_.title("正在安装运行库")
sc_.geometry("%dx%d+%d+%d"%(w,h,x,y))
sc_.maxsize(w,h)
sc_.minsize(w,h)
try:#从双源尝试下载Logo
sc_.iconbitmap('logo.ico')
except:
try:
urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc_.iconbitmap('logo.ico')
except:
urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc_.iconbitmap('logo.ico')
try:
ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配
ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0)
sc_.tk.call('tk', 'scaling', ScaleFactor/75)
except:
pass
def showmain():
while True:
for i in range(100):
try:
# 每次更新加1
pb_['value'] = i + 1
# 更新画面
sc_.update()
time.sleep(0.05)
except:
exit()
for i in range(100):
try:
# 每次更新减1
pb_['value'] = 100 - i
# 更新画面
sc_.update()
time.sleep(0.05)
except:
exit()
def ThreadStop():
while True:
try:
if ThreadShouldStop:
#sc_.destroy()
os.system("taskkill -f -im python.exe")
os.system("taskkill -f -im pythonw.exe")
os.system("taskkill -f -im py.exe")
os.system("taskkill -f -im pyw.exe")
except:
pass
time.sleep(0.1)
run___=threading.Thread(target=ThreadStop)
run___.daemon=True
run___.start()
def show():#多线程运行主函数,防止主线程GUI卡死
run__=threading.Thread(target=showmain)
run__.start()
lb1_=Label(sc_,text="正在安装程序必要的运行库……",font=("宋体",13))
lb1_.place(x=30,y=30)
lb2_=Label(sc_,text=" 正在安装:requests\n\n安装完毕后请手动重启程序",font=("宋体",10))
lb2_.place(x=150,y=90,anchor="center")
pb_=Progressbar(sc_,length=240,mode='indeterminate',orient=tkinter.HORIZONTAL)
pb_.place(x=30,y=130)
show()
sc_.protocol('WM_DELETE_WINDOW', on_closing)
sc_.mainloop()
run___=threading.Thread(target=sc_main_)
run___.start()
result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple requests").read()
if "Successfully installed" in result:#安装requests成功
import requests
from requests import delete
ThreadShouldStop=True
else:#安装requests失败
if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93")
exit()
try:
from ttkbootstrap import Style
except:#没有安装ttkbootstrap
result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple ttkbootstrap").read()
if "Successfully installed" in result:#安装ttkbootstrap成功
from ttkbootstrap import Style
ThreadShouldStop=True
else:#安装ttkbootstrap失败
if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93")
exit()
else:#安装了requests
try:
from ttkbootstrap import Style
except:#没有安装ttkbootstrap
from tkinter import *
import tkinter
from tkinter.ttk import *
def sc_main_():
sc_=Tk()
#窗口居中
scw=sc_.winfo_screenwidth()
sch=sc_.winfo_screenheight()
w=300
h=200
x=(scw-w)/2
y=(sch-h)/2
sc_.title("正在安装运行库")
sc_.geometry("%dx%d+%d+%d"%(w,h,x,y))
sc_.maxsize(w,h)
sc_.minsize(w,h)
try:#从双源尝试下载Logo
sc_.iconbitmap('logo.ico')
except:
try:
urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc_.iconbitmap('logo.ico')
except:
urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc_.iconbitmap('logo.ico')
try:
ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配
ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0)
sc_.tk.call('tk', 'scaling', ScaleFactor/75)
except:
pass
def showmain():
while True:
for i in range(100):
try:
# 每次更新加1
pb_['value'] = i + 1
# 更新画面
sc_.update()
time.sleep(0.05)
except:
exit()
for i in range(100):
try:
# 每次更新减1
pb_['value'] = 100 - i
# 更新画面
sc_.update()
time.sleep(0.05)
except:
exit()
def ThreadStop():
while True:
try:
if ThreadShouldStop:
#sc_.destroy()
os.system("taskkill -f -im python.exe")
os.system("taskkill -f -im pythonw.exe")
os.system("taskkill -f -im py.exe")
os.system("taskkill -f -im pyw.exe")
except:
pass
time.sleep(0.1)
run___=threading.Thread(target=ThreadStop)
run___.start()
def show():#多线程运行主函数,防止主线程GUI卡死
run__=threading.Thread(target=showmain)
run__.start()
lb1_=Label(sc_,text="正在安装程序必要的运行库……",font=("宋体",13))
lb1_.place(x=30,y=30)
lb2_=Label(sc_,text=" 正在安装:ttkbootstrap\n\n安装完毕后请手动重启程序",font=("宋体",10))
lb2_.place(x=150,y=90,anchor="center")
pb_=Progressbar(sc_,length=240,mode='indeterminate',orient=tkinter.HORIZONTAL)
pb_.place(x=30,y=130)
show()
sc_.protocol('WM_DELETE_WINDOW', on_closing)
sc_.mainloop()
run___=threading.Thread(target=sc_main_)
run___.daemon=True
run___.start()
result=os.popen("pip install -i https://pypi.tuna.tsinghua.edu.cn/simple ttkbootstrap").read()
if "Successfully installed" in result:#安装ttkbootstrap成功
from ttkbootstrap import Style
ThreadShouldStop=True
else:#安装ttkbootstrap失败
if tkinter.messagebox.askyesno("错误","运行库安装失败,程序无法继续运行!\n请把以下内容提交给开发者:\n"+result+"\n是否要提交错误?"):os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5B%E6%BC%8F%E6%B4%9E%5D+%E6%97%A0%E6%B3%95%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96%E5%BA%93")
exit()
#定义函数
def getzbmain():#主函数
id_=e.get()
if id_=="":
tkinter.messagebox.showerror(title='错误', message='请填写内容!')
else:
zt.set("状态:正在向Mojang请求玩家的UUID……")
url1="https://api.mojang.com/users/profiles/minecraft/"+id_
r = requests.get(url1)
del url1
status_code=r.status_code
if not status_code==200:
zt.set("状态:Bugjump出现错误,请检查你的输入!")
del status_code
zt.set("状态:读取UUID中……")
r=r.text
r=json.loads(r)
try:
uuid=r['id']
except:
zt.set("状态:Bugjump出现错误,请检查你的输入!")
del r
zt.set("状态:向Mojang请求下载皮肤的地址中……")
url2="https://sessionserver.mojang.com/session/minecraft/profile/"+uuid
r = requests.get(url2)
del url2
status_code=r.status_code
if not status_code==200:
zt.set("状态:Bugjump出现错误,请检查你的输入!")
del status_code
zt.set("状态:读取皮肤下载地址中……")
r=r.text
r=json.loads(r)
properties=r["properties"]
del r
properties=properties[0]
properties=properties["value"]
zt.set("状态:解码皮肤下载地址中……")
properties=base64.b64decode(properties)
properties=properties.decode()
properties=json.loads(properties)
url3=properties["textures"]
del properties
havecape=False
try:
cape=url3["CAPE"]
cape=cape["url"]
filename=id_+'_cape.png'
zt.set("状态:成功获取披风下载直链,正在尝试下载……")
urlretrieve(cape,filename)
havecape=True
except:
pass
url3=url3["SKIN"]
try:
isalex=url3["metadata"]
isalex=isalex["model"]
except:
isalex=""
url3=url3["url"]
filename=id_+'.png'
zt.set("状态:成功获取皮肤下载直链,正在尝试下载……")
urlretrieve(url3,filename)
del url3
del filename
if havecape:
exit_=str(tkinter.messagebox.showwarning(title="下载完毕", message="下载完毕!此玩家还拥有披风,已同时下载!"))
else:
exit_=str(tkinter.messagebox.showwarning(title="下载完毕", message="下载完毕!"))
zt.set("状态:待命")
lb2.config(textvariable=zt)
exit_=tkinter.messagebox.askyesno(title="下载完毕", message="下载完毕!按“确认”打包皮肤成材质包,或者按“取消”打开文件!")
if exit_==True:
try:
file=".\\"+id_
shutil.rmtree(file)
del file
except:
zt.set("状态:正在删除旧的临时目录……")
zt.set("状态:正在创建新的临时目录……")
lb2.config(textvariable=zt)
file="./"+id_
os.mkdir(file)
del file
zt.set("状态:正在创建材质包说明文件……")
lb2.config(textvariable=zt)
filename = './'+id_+"/pack.mcmeta"
mcmeta="{\"pack\":{\"pack_format\":7,\"description\":\"§c",id_,"\'s Skin Resourcepack\"}}"
with open(filename, 'w') as file_object:
file_object.write("{\"pack\":{\"pack_format\":4,\"description\":\"§cSkin Resourcepack\"}}")
del filename
del mcmeta
zt.set("状态:正在下载材质包Logo……")
lb2.config(textvariable=zt)
url3="https://pic.downk.cc/item/5ff174673ffa7d37b35bb165.png"
filename="./"+id_+"/pack.png"
urlretrieve(url3,filename)
del url3
del filename
zt.set("状态:正在创建皮肤目录……")
lb2.config(textvariable=zt)
file="./"+id_+"/assets"
os.mkdir(file)
del file
file="./"+id_+"/assets/minecraft"
os.mkdir(file)
del file
file="./"+id_+"/assets/minecraft/textures"
os.mkdir(file)
del file
file="./"+id_+"/assets/minecraft/textures/entity"
os.mkdir(file)
del file
zt.set("状态:正在复制皮肤文件……")
lb2.config(textvariable=zt)
if isalex=="slim":
cmd="copy "+id_+".png .\\"+id_+"\\assets\\minecraft\\textures\\entity\\alex.png"
os.system(cmd)
else:
cmd="copy "+id_+".png .\\"+id_+"\\assets\\minecraft\\textures\\entity\\steve.png"
os.system(cmd)
del cmd
zt.set("状态:正在压缩材质包……")
lb2.config(textvariable=zt)
shutil.make_archive("Skin_"+id_,'zip',id_)
zt.set("状态:正在删除临时目录……")
lb2.config(textvariable=zt)
file=".\\"+id_
shutil.rmtree(file)
del file
zt.set("状态:待命")
if os.path.exists(".\\.minecraft\\resourcepacks"):
exit_=tkinter.messagebox.askyesno(title="创建材质包成功", message="成功创建材质包!\n注意:材质包会将游戏内的所有玩家的皮肤都替换成你想要的皮肤,可能会导致一些小问题!\n检测到程序目录下有Minecraft安装,如果要直接导入Minecraft,请按下“确认”,否则请按下“取消”打开材质包。")
if exit_==True:
cmd="copy Skin_"+id_+".zip .\\.minecraft\\resourcepacks\\"+id_+".zip"
os.system(cmd)
del cmd
exit_=tkinter.messagebox.askyesno(title="导入成功", message="导入成功!\n是否要打开材质包文件夹?")
if exit_==True:
start="start \"\" .\\.minecraft\\resourcepacks\\"
os.system(start)
del exit_
else:
start="start \"\" "+"\""+id_+'.zip'+"\""
os.system(start)
else:
exit_=tkinter.messagebox.askyesno(title="创建材质包成功", message="创建材质包成功!注意:材质包会将游戏内的所有玩家的皮肤都替换成你想要的皮肤,可能会导致一些小问题!\n是否要打开材质包?")
if exit_==True:
start="start \"\" "+"\"Skin_"+id_+'.zip'+"\""
os.system(start)
del exit_
else:
start=id_+'.png'
os.startfile(start)
def getzb(ev=None):#多线程运行主函数,防止主线程GUI卡死
run_=threading.Thread(target=getzbmain)
run_.start()
def info():#关于页面
def opengithub():
os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader")
def opengitee():
os.startfile("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader")
def openbilibili():
os.startfile("https://space.bilibili.com/505201154")
about=Toplevel()
about.title("关于本程序")
aboutscw=about.winfo_screenwidth()
aboutsch=about.winfo_screenheight()
aboutw=300
abouth=210
aboutx=(aboutscw-aboutw)/2
abouty=(aboutsch-abouth)/2
about.geometry("%dx%d+%d+%d"%(aboutw,abouth,aboutx,abouty))
about.iconbitmap('logo.ico')
lb4=Label(about,text="关于本程序",font=("宋体",15))
lb4.place(x=100,y=30)
lb5=Label(about,text="一个简单的Minecraft\n\n 正版皮肤下载器。",font=("宋体",15))
lb5.place(x=150,y=100,anchor=CENTER)
btn3=Button(about,text="Github",command=opengithub,bootstyle=(SUCCESS, OUTLINE))
btn3.place(x=220,y=155)
btn4=Button(about,text="Gitee",command=opengitee,bootstyle=(SUCCESS, OUTLINE))
btn4.place(x=122.5,y=155)
btn5=Button(about,text="Bilibili",command=openbilibili,bootstyle=(SUCCESS, OUTLINE))
btn5.place(x=25,y=155)
def TryUpdate(update_url):#尝试更新
update=requests.get(update_url)
update=update.text
update=json.loads(update)
if float(update["tag_name"])>version_int:
assets=update["assets"]
browser_download_url_list=assets[0]
browser_download_url=browser_download_url_list["browser_download_url"]
is_update=tkinter.messagebox.askyesno(title="检测到新版本", message="本程序有新版本!是否要下载?")
if is_update==True:
def autoupdate():
btn1.config(state=DISABLED)
btn1.config(text="更新中……")
zt.set("状态:更新中,请稍候……")
fn=os.path.splitext(os.path.basename(__file__))[0]+os.path.splitext(os.path.basename(__file__))[1]
with open("Update.bat", 'w') as file_object:
file_object.write("@echo off\ntaskkill -f -im python.exe\ntaskkill -f -im pythonw.exe\ntaskkill -f -im "+fn+"\ndel /s /q /f "+fn+"\nren New_MinecraftSkinDownloader.exe "+fn+"\nstart "+fn)
urlretrieve(browser_download_url,"New_MinecraftSkinDownloader.exe")
os.startfile("Update.bat")
exit()
run_1=threading.Thread(target=autoupdate)
run_1.start()
del update
from tkinter import *
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from tkinter.ttk import *
#sc=ThemedTk(theme="equilux", toplevel=True, themebg=True)
sc=Tk()
#窗口居中
scw=sc.winfo_screenwidth()
sch=sc.winfo_screenheight()
w=500
h=300
x=(scw-w)/2
y=(sch-h)/2
sc.title("Minecraft正版皮肤下载器"+version+" By 萌新欻無")
sc.geometry("%dx%d+%d+%d"%(w,h,x,y))
sc.maxsize(w,h)
sc.minsize(w,h)
try:#从双源尝试下载Logo
sc.iconbitmap('logo.ico')
except:
try:
urlretrieve("https://gitee.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc.iconbitmap('logo.ico')
except:
urlretrieve("https://github.com/NewbieXvwu/MinecraftSkinDownloader/raw/main/logo.ico","logo.ico")
sc.iconbitmap('logo.ico')
try:
ctypes.windll.shcore.SetProcessDpiAwareness(1)#高DPI适配
ScaleFactor=ctypes.windll.shcore.GetScaleFactorForDevice(0)
sc.tk.call('tk', 'scaling', ScaleFactor/75)
except:
pass
#主屏幕组件初始化
lb1=Label(sc,text="请输入Minecraft正版账号名称",font=("宋体",15))
lb1.place(x=110,y=50)
e=Entry(sc,width=20)
e.place(x=170,y=120)
e.bind("<Return>",getzb)
btn1=Button(sc,text="点击获取",command=getzb,bootstyle=(SUCCESS, OUTLINE))
btn1.place(x=210,y=190)
zt=tkinter.StringVar()
zt.set("状态:待命")
lb2=Label(sc,textvariable=zt,font=("宋体",15))
lb2.place(x=10,y=270)
btn2=Button(sc,text="关于",command=info,bootstyle=(SUCCESS, OUTLINE))
btn2.place(x=440,y=260)
lb3=Label(sc,text=version,font=("宋体",10))
lb3.place(x=5,y=5)
cmb = Combobox(sc,width=7)
cmb.place(x=420,y=5)
ms=("浅色模式","深色模式")
cmb["value"]=ms
cmb.current(0)
def func(event):
if cmb.get()==ms[0]:
style = ttk.Style("cosmo")
elif cmb.get()==ms[1]:
style = ttk.Style("superhero")
cmb.bind("<<ComboboxSelected>>",func)
try:#读取Windows 10深色模式
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER,r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize")
try:
i = 0
while True:
#EnumValue方法用来枚举键值,EnumKey用来枚举子键
name,value,type=winreg.EnumValue(key,i)
if str(name)=="AppsUseLightTheme":
break
i +=1
if value==0:
style = ttk.Style("superhero")
cmb.current(1)
else:
style = ttk.Style("cosmo")
cmb.current(0)
except WindowsError:
pass
except:
pass
if float(str(platform.version().split(".")[0])+"."+str(platform.version().split(".")[1]))>6.3 and int(platform.python_version().split(".")[1])<=8:
if tkinter.messagebox.askyesno(title="您正在使用过旧的Python", message="您的操作系统为Windows "+str(platform.version().split(".")[0])+",\n但本程序正运行在版本为"+platform.python_version()+"的Python上。\n这可能是因为您下载了本程序的Windows 7兼容版。\n使用兼容版将会导致程序的稳定性无法得到保证,因为本程序的开发使用了更新的Python版本。\n您是否要下载一个稳定性更好的版本?"):
os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/releases")
exit()
if ispreview:#预览版警告
if not tkinter.messagebox.askyesno(title="您正在使用预览版", message="您正在使用的版本为"+version+",这是一个预览版。\n使用预览版可能会带来一些不可预知的问题!\n您是否要继续?"):
os.startfile("https://github.com/NewbieXvwu/MinecraftSkinDownloader/releases")
exit()
try:
TryUpdate("https://gitee.com/api/v5/repos/NewbieXvwu/MinecraftSkinDownloader/releases/latest")
except:
try:
TryUpdate("https://api.github.com/repos/NewbieXvwu/MinecraftSkinDownloader/releases/lates")
except:
pass
sc.mainloop() |
hello.py | import os
from threading import Thread
from flask import Flask, render_template, session, redirect, url_for
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail, Message
from flask_script import Manager
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')
app.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] = '[Flasky]'
app.config['FLASKY_MAIL_SENDER'] = 'Flasky Admin <flasky@example.com>'
app.config['FLASKY_ADMIN'] = os.environ.get('FLASKY_ADMIN')
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
manager = Manager(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
msg = Message(
app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'],
recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[DataRequired()])
submit = SubmitField('Submit')
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Role=Role)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
if app.config['FLASKY_ADMIN']:
send_email(
app.config['FLASKY_ADMIN'],
'New User',
'mail/new_user',
user=user)
else:
session['known'] = True
session['name'] = form.name.data
return redirect(url_for('index'))
return render_template(
'index.html',
form=form,
name=session.get('name'),
known=session.get('known', False))
if __name__ == '__main__':
manager.run() |
parameter_server.py | """
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import os
from hops import hdfs as hopshdfs
from hops import tensorboard
from hops import devices
from hops import util
import pydoop.hdfs
import threading
import datetime
import socket
import json
from . import parameter_server_reservation
run_id = 0
def _launch(sc, map_fun, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
local_logdir:
name:
Returns:
"""
global run_id
app_id = str(sc.applicationId)
num_executions = int(sc._conf.get("spark.executor.instances"))
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup("ParameterServerStrategy", "{} | Distributed Training".format(name))
server = parameter_server_reservation.Server(num_executions)
server_addr = server.start()
num_ps = util.num_param_servers()
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps))
logdir = _get_logdir(app_id)
path_to_metric = logdir + '/metric'
if pydoop.hdfs.path.exists(path_to_metric):
with pydoop.hdfs.open(path_to_metric, "r") as fi:
metric = float(fi.read())
fi.close()
return metric, logdir
print('Finished Experiment \n')
return None, logdir
def _get_logdir(app_id):
"""
Args:
app_id:
Returns:
"""
global run_id
return hopshdfs._get_experiments_dir() + '/' + app_id + '/parameter_server/run.' + str(run_id)
def _prepare_func(app_id, run_id, map_fun, local_logdir, server_addr, num_ps):
"""
Args:
app_id:
run_id:
map_fun:
local_logdir:
server_addr:
num_ps:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
role = None
client = parameter_server_reservation.Client(server_addr)
try:
host = util._get_ip_address()
tmp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_socket.bind(('', 0))
port = tmp_socket.getsockname()[1]
host_port = host + ":" + str(port)
exec_spec = {}
if executor_num < num_ps:
exec_spec["task_type"] = "ps"
else:
exec_spec["task_type"] = "worker"
exec_spec["host_port"] = host_port
exec_spec["gpus_present"] = devices.get_num_gpus() > 0
client.register(exec_spec)
cluster = client.await_reservations()
tmp_socket.close()
role, index = _find_task_and_index(host_port, cluster)
cluster_spec = {}
cluster_spec["cluster"] = cluster
cluster_spec["task"] = {"type": role, "index": index}
print(cluster_spec)
os.environ["TF_CONFIG"] = json.dumps(cluster_spec)
if role == "chief":
hdfs_exec_logdir, hdfs_appid_logdir = hopshdfs._create_directories(app_id, run_id, None, 'parameter_server')
pydoop.hdfs.dump('', os.environ['EXEC_LOGFILE'], user=hopshdfs.project_user())
hopshdfs._init_logger()
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
gpu_str = '\nChecking for GPUs in the environment' + devices._get_gpu_info()
if role == "chief":
hopshdfs.log(gpu_str)
print(gpu_str)
print('-------------------------------------------------------')
print('Started running task \n')
if role == "chief":
hopshdfs.log('Started running task')
task_start = datetime.datetime.now()
retval=None
if role == "ps":
ps_thread = threading.Thread(target=lambda: map_fun())
ps_thread.start()
print("waiting for workers")
client.await_all_workers_finished()
print("waiting finished")
else:
retval = map_fun()
if role == "chief":
if retval:
_handle_return(retval, hdfs_exec_logdir)
task_end = datetime.datetime.now()
time_str = 'Finished task - took ' + util._time_diff(task_start, task_end)
print('\n' + time_str)
print('-------------------------------------------------------')
if role == "chief":
hopshdfs.log(time_str)
except:
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
raise
finally:
if role == "chief":
if local_logdir:
local_tb = tensorboard.local_logdir_path
util._store_local_tensorboard(local_tb, hdfs_exec_logdir)
try:
if role == "worker" or role == "chief":
client.register_worker_finished()
client.close()
except:
pass
_cleanup(tb_hdfs_path)
if devices.get_num_gpus() > 0:
t.do_run = False
t.join()
return _wrapper_fun
def _cleanup(tb_hdfs_path):
"""
Args:
tb_hdfs_path:
Returns:
"""
handle = hopshdfs.get()
if not tb_hdfs_path == None and not tb_hdfs_path == '' and handle.exists(tb_hdfs_path):
handle.delete(tb_hdfs_path)
hopshdfs._kill_logger()
def _find_task_and_index(host_port, cluster_spec):
"""
Args:
host_port:
cluster_spec:
Returns:
"""
index = 0
for entry in cluster_spec["worker"]:
if entry == host_port:
return "worker", index
index = index + 1
index = 0
for entry in cluster_spec["ps"]:
if entry == host_port:
return "ps", index
index = index + 1
if cluster_spec["chief"][0] == host_port:
return "chief", 0
def _handle_return(val, hdfs_exec_logdir):
"""
Args:
val:
hdfs_exec_logdir:
Returns:
"""
try:
test = int(val)
except:
raise ValueError('Your function needs to return a metric (number) which should be maximized or minimized')
metric_file = hdfs_exec_logdir + '/metric'
fs_handle = hopshdfs.get_fs()
try:
fd = fs_handle.open_file(metric_file, mode='w')
except:
fd = fs_handle.open_file(metric_file, flags='w')
fd.write(str(float(val)).encode())
fd.flush()
fd.close()
|
__main__.py | import argparse
import logging
import multiprocessing
import sys
from saq.worker import check_health, start
def main():
parser = argparse.ArgumentParser(description="Start Simple Async Queue Worker")
parser.add_argument(
"settings",
type=str,
help="Namespaced variable containing worker settings eg: eg module_a.settings",
)
parser.add_argument(
"--workers", type=int, help="Number of worker processes", default=1
)
parser.add_argument(
"--verbose",
"-v",
action="count",
help="Logging level: 0: ERROR, 1: INFO, 2: DEBUG",
default=0,
)
parser.add_argument(
"--web",
action="store_true",
help="Start web app",
)
parser.add_argument(
"--port",
type=str,
help="Web app port, defaults to 8080",
)
parser.add_argument(
"--check",
action="store_true",
help="Perform a health check",
)
args = parser.parse_args()
level = args.verbose
if level == 0:
level = logging.ERROR
elif level == 1:
level = logging.INFO
else:
level = logging.DEBUG
settings = args.settings
logging.basicConfig(level=level)
if args.check:
sys.exit(check_health(settings))
else:
workers = args.workers
if workers > 1:
for _ in range(workers - 1):
p = multiprocessing.Process(target=start, args=(settings,))
p.start()
start(settings, web=args.web, port=args.port)
if __name__ == "__main__":
main()
|
scheduler.py | from datetime import datetime
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from threading import Thread
from typing import Optional
import schedule
from schedule import default_scheduler
class OneTimeJob(schedule.Job):
# Override schedule.Job._schedule_next_run to avoid periodic job generation.
def _schedule_next_run(self):
pass
def set_next_run(self, next_time: datetime):
if not isinstance(next_time, datetime):
raise AssertionError("The next_time parameter should be a datetime object.")
self.at_time = next_time
self.next_run = next_time
self.should_run = True
@property
def should_run(self):
return self._keep_running and super().should_run
@should_run.setter
def should_run(self, value):
self._keep_running = value
def run(self):
# This prevents the job from running more than once
self.should_run = False
super().run()
return schedule.CancelJob()
def _default_scheduler_once(self, trigger_time: datetime):
job = OneTimeJob(0, self)
job.set_next_run(trigger_time)
return job
def _run_job(self, job):
"""Overrides default_scheduler._run_job to support running the jobs in a separate
process.
Either way, this waits for the result in a dedicated thread to prevent blocking the
event loop.
"""
def launch_and_wait():
# Launch job in a dedicated process and send the result through a pipe.
if "subprocess" in job.tags:
def wrapped_run(pipe: Connection):
result = job.run()
pipe.send(result)
pipe, child_pipe = Pipe()
p = Process(target=wrapped_run, args=(child_pipe,))
p.start()
result = pipe.recv()
else:
# Or simply run the job in this thread
result = job.run()
if isinstance(result, schedule.CancelJob) or result is schedule.CancelJob:
self.cancel_job(job)
Thread(target=launch_and_wait).start()
def _once(trigger_time: Optional[datetime] = None):
"""Adds support for scheduling one-time jobs to the default_scheduler."""
if trigger_time is None:
trigger_time = datetime.now()
if not isinstance(trigger_time, datetime):
raise AssertionError("The trigger_time parameter should be a datetime object.")
return default_scheduler.once(self=default_scheduler, trigger_time=trigger_time)
# Monkey-Patching
default_scheduler.once = _default_scheduler_once
schedule.Scheduler._run_job = _run_job
schedule.once = _once
|
my_library.py | #!/usr/bin/env python2.7
# my_library.py - This Module is for the most used classes and methods
# URL: https://github.com/engdan77/otis_service
# Author: Daniel Engvall (daniel@engvalls.eu)
__version__ = "$Revision: 20190123.1251 $"
import SocketServer
import sys
import threading
import smbus
import time
def get_datetime():
import time
return time.strftime("%Y-%m-%d %H:%M:%S")
def log_stderr(message, level='ERROR', log_object=None):
""" This is a function to log to stderr if log_object is missing (not used) """
if log_object is None and (level == 'ERROR' or level == 'CRITICAL'):
sys.stderr.write(str(message) + "\n")
else:
log_object.log(message, level)
class ClassSyslogLogger:
"""
This is a class to create a syslog-logger object
address: ip or address to syslog server
port: default port udp 514
"""
def __init__(self, address, port=514, defaultlevel='INFO'):
import logging
from logging.handlers import SysLogHandler
self.oLogger = logging.getLogger(__name__)
if defaultlevel == 'INFO':
self.oLogger.setLevel(logging.INFO)
elif defaultlevel == 'WARNING':
self.oLogger.setLevel(logging.WARNING)
elif defaultlevel == 'ERROR':
self.oLogger.setLevel(logging.ERROR)
elif defaultlevel == 'CRITICAL':
self.oLogger.setLevel(logging.CRITICAL)
else:
self.oLogger.setLevel(logging.INFO)
# create logger handler object
self.oHandler = SysLogHandler(address=(address, port))
# create a logging format
self.oFormatter = logging.Formatter(
'%(asctime)s -[ %(pathname)s] - [%(levelname)s] - %(message)s')
# assign formatter to handler
self.oHandler.setFormatter(self.oFormatter)
# add the handlers to the logger
self.oLogger.addHandler(self.oHandler)
def log(self, message, level='INFO'):
"""
Log message to syslog
message: Message you like to be added
level: Chose between INFO, WARNING, ERROR, DEBUG
"""
if level == 'INFO':
self.oLogger.info(message)
elif level == 'WARNING':
self.oLogger.warning(message)
elif level == 'ERROR':
# attribute to dump traceback to logger
self.oLogger.error(message)
elif level == 'CRITICAL':
# attribute to dump traceback to logger
self.oLogger.critical(message)
else:
self.oLogger.info(message)
# Get verbose stack-information
class ClassFileLogger:
"""
This is a class to create a file-logger object
logfile: Filename of the file to log to
maxsize: The maximum size in megabytes before rotating log
"""
def __init__(self, logfile, maxsize, count, defaultlevel='INFO'):
import logging.handlers
self.logfile = logfile
self.maxsize = maxsize
self.count = count
self.oLogger = logging.getLogger(__name__)
self.defaultlevel = defaultlevel
if defaultlevel == 'DEBUG' or defaultlevel == 'VERBOSE':
self.oLogger.setLevel(logging.DEBUG)
elif defaultlevel == 'INFO':
self.oLogger.setLevel(logging.INFO)
elif defaultlevel == 'WARNING':
self.oLogger.setLevel(logging.WARNING)
elif defaultlevel == 'ERROR':
self.oLogger.setLevel(logging.ERROR)
elif defaultlevel == 'CRITICAL':
self.oLogger.setLevel(logging.CRITICAL)
else:
self.oLogger.setLevel(logging.INFO)
# create a logging format
self.oFormatter = logging.Formatter(
'%(asctime)s -[ %(name)s] - [%(levelname)s] - %(message)s')
# create a LogRoation handler
self.oHandler = logging.handlers.RotatingFileHandler(
self.logfile,
maxBytes=self.maxsize * 1000000,
backupCount=self.count)
# assign formatter to handler
self.oHandler.setFormatter(self.oFormatter)
# add the handlers to the logger
self.oLogger.addHandler(self.oHandler)
# add information that logger been created
self.oLogger.info('Logging object initiated for ' + logfile)
def log(self, message, level='INFO'):
"""
Log message to file
message: Message you like to be added
level: Chose between INFO, WARNING, ERROR, DEBUG
"""
if level == 'INFO':
self.oLogger.info(message)
elif level == 'WARNING':
self.oLogger.warning(message)
elif level == 'ERROR':
# attribute to dump traceback to logger
self.oLogger.error(message, exc_info=True)
elif level == 'CRITICAL':
# attribute to dump traceback to logger
self.oLogger.critical(message, exc_info=True)
elif level == 'DEBUG' and self.defaultlevel == 'VERBOSE':
self.oLogger.debug(message)
# Get verbose stack-information
import inspect
try:
# noinspection PyPep8
frame, filename, line_number, function_name, lines, index = \
inspect.getouterframes(inspect.currentframe())[1]
self.oLogger.debug("Outerframe[1] " + filename + ":" + str(line_number) + " " + str(lines))
# noinspection PyPep8
frame, filename, line_number, function_name, lines, index = \
inspect.getouterframes(inspect.currentframe())[2]
self.oLogger.debug("Outerframe[2] " + filename + ":" + str(line_number) + " " + str(lines))
self.oLogger.debug("Outerframe[3] " + filename + ":" + str(line_number) + " " + str(lines))
# noinspection PyPep8
frame, filename, line_number, function_name, lines, index = \
inspect.getouterframes(inspect.currentframe())[4]
self.oLogger.debug("Outerframe[4] " + filename + ":" + str(line_number) + " " + str(lines))
except IndexError:
self.oLogger.debug("Debug, stack index out of range")
elif level == 'DEBUG' and self.defaultlevel == 'DEBUG':
self.oLogger.debug(message)
# noinspection Annotator,Annotator,Annotator,Annotator,PyUnresolvedReferences
class ClassDB:
"""
This is a class to create database object
dbtype: either 'sqlite' or 'mysql'
dbconnect: Filename of the sqlite file or list for mysql ('host', 'port', 'user','password','database')
"""
def __init__(self, dbtype, dbconnect, logger_object=None):
self.dbtype = dbtype
self.dbconnect = dbconnect
self.oLogger = logger_object
def create(self, table_columns):
"""
Method - Creates the database defined
Args:
table_colums (array/tuple):
([table1, column1, type], [table1,column2, type])
([table1, column1, type],)
:param table_columns:
"""
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
auto_keyword = 'AUTOINCREMENT'
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
# noinspection PyPep8
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db,
connection_timeout=120, buffered=True)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
auto_keyword = 'AUTO_INCREMENT'
else:
raise ValueError('Wrong dbtype used')
cursor = connection.cursor()
# Function to create table
def create_table(name, idcolumn, tabledef):
try:
sql = "CREATE TABLE " + name + \
"(" + idcolumn + " " + tabledef + ")"
cursor.execute(sql)
except Exception as value:
# noinspection PyPep8
if self.oLogger: self.oLogger.log(value, 'INFO')
finally:
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql, 'DEBUG')
# Function to create column
def create_column(name, columnname, columndef):
try:
sql = "ALTER TABLE " + name + " ADD COLUMN " + \
columnname + " " + columndef
cursor.execute(sql)
except Exception as value:
# noinspection PyPep8
if self.oLogger: self.oLogger.log(value, 'INFO')
finally:
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql, 'DEBUG')
# Create lits of unique tables
all_tables = set()
for element in table_columns:
if element[0] not in table_columns:
all_tables.add(element[0])
all_tables = sorted(all_tables)
# Create all tables
for table in all_tables:
create_table(table, 'id', 'INTEGER PRIMARY KEY ' + auto_keyword)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Creating table ' + table, 'INFO')
# Create all columns
for table_column in table_columns:
create_column(table_column[0], table_column[1], table_column[2])
# Correct encoding for column if necessary for sqlite
if self.dbtype == 'sqlite':
connection.create_function('FIXENCODING', 1, lambda s: str(s).decode('latin-1'))
# noinspection PyPep8
connection.execute(
"UPDATE " + table_column[0] + " SET " + table_column[1] + "=FIXENCODING(CAST(" + table_column[
1] + " AS BLOB))")
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
def insert(self, table, values):
"""
Method - add data to column(s)
Args:
table: String
values: List or Dict
Example: object.insert('table1', ('value1',))
Example: object.insert('table1', {col1: 'value1', col2: 'value2')
"""
db_error = False
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
try:
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
except MySQLdb.OperationalError as e:
print "Error connecting to database - insert"
print e
db_error = True
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
# noinspection PyPep8
if not db_error is True:
cursor = connection.cursor()
# If the values is list
if type(values) is list:
# Create sql_statement
if len(values) > 0:
sql_statement = "?"
for i in range(len(values) - 1):
sql_statement = sql_statement + ",?"
sql_statement = "INSERT INTO " + table + " VALUES (NULL, " + sql_statement + ");"
else:
sys.stderr.write("Missing values for insert into SQL")
# if the values is dict
if type(values) is dict:
# Iterate through the rest of values
for i, k in enumerate(values.keys(), start=0):
if i == 0:
columns = k
vals = "'" + str(values[k]) + "'"
else:
columns = columns + ", " + k
vals = vals + ", '" + str(values[k]) + "'"
sql_statement = "INSERT INTO " + table + "(" + columns + ") VALUES (" + vals + ");"
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_statement, 'DEBUG')
cursor.execute(sql_statement)
connection.commit()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
def update(self, table, condition, values):
"""
Method - update column(s)
Args:
table: String
condition: rowid or dict
values: Dict
Example: object.update('table1', {col1: 'value1'}, {col2: 'value'})
Example: object.update('table1', 10, {col2: 'value'})
"""
import time
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
# noinspection PyPep8
try:
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
except:
if self.oLogger:
self.oLogger.log('Could not store data to database, wating 30 sec to re-attempt ', 'ERROR')
else:
print "Could not store data to database, waiting 30 sec for re-attempt"
time.sleep(30)
# noinspection PyPep8
try:
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
except:
if self.oLogger:
self.oLogger.log('Could not store data to database, skipping', 'ERROR')
else:
print "Could not store data to database, skipping"
return "Fail"
# Successfully connected
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
cursor = connection.cursor()
# if the condition is dict
if type(condition) is dict:
# Iterate through the rest of values
for i, k in enumerate(condition.keys(), start=0):
if i == 0:
sql_where = k + "='" + str(condition[k]) + "'"
else:
sql_where = sql_where + " AND " + k + "='" + str(condition[k]) + "'"
elif type(condition) is int:
sql_where = "id=" + str(condition)
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Wrong condition, require rowid or dict', 'ERROR')
raise
# if the values is dict
if type(values) is dict:
# Iterate through the rest of values
for i, k in enumerate(values.keys(), start=0):
if i == 0:
sql_set = k + "='" + str(values[k]) + "'"
else:
sql_set = sql_set + "," + k + "='" + str(values[k]) + "'"
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Wrong values, require dict', 'ERROR')
raise
sql_condition = "UPDATE " + table + " SET " + sql_set + " WHERE " + sql_where + ";"
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_condition, 'DEBUG')
cursor.execute(sql_condition)
connection.commit()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
def select(self, table, condition):
"""
Method - select and return dict
Args:
table: String
condition: dict or string
Example: object.select('table1', {col1: 'value1', col2: 'value2'})
"""
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
try:
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
except MySQLdb.OperationalError as e:
print "Could not access database - select"
print e
return None
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
cursor = connection.cursor()
sql_where = ""
# if the condition is dict
if type(condition) is dict:
# Iterate through the rest of values
for i, k in enumerate(condition.keys(), start=0):
if i == 0:
sql_where = k + "='" + str(condition[k]) + "'"
elif i > 0:
sql_where = sql_where + " AND " + k + "='" + str(condition[k]) + "'"
elif type(condition) is str:
sql_where = condition
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Wrong condition, require dict or string', 'ERROR')
raise
sql_condition = "SELECT * FROM " + table + " WHERE " + sql_where + ";"
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_condition, 'DEBUG')
cursor.execute(sql_condition)
data = cursor.fetchall()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
if len(data) <= 0:
return None
else:
return data
def sql(self, condition):
"""
Method - select and return dict
Args:
condition: str
Example: object.('SELECT * FROM tabel WHERE X = Y'})
"""
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
cursor = connection.cursor()
sql_condition = condition
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_condition, 'DEBUG')
cursor.execute(sql_condition)
data = cursor.fetchall()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
if len(data) <= 0:
return None
else:
return data
def delete(self, table, condition):
"""
Method - delete
Args:
table: String
condition: dict or string
Example: object.delete('table1', {col1: 'value1', col2: 'value2'})
Example: object.delete('table1', 'col1 > 10')
"""
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
cursor = connection.cursor()
sql_where = ""
# if the condition is dict
if type(condition) is dict:
# Iterate through the rest of values
for i, k in enumerate(condition.keys(), start=0):
if i == 0:
sql_where = k + "='" + str(condition[k]) + "'"
elif i > 0 and type(condition[k]) is str:
sql_where = sql_where + " AND " + k + "='" + str(condition[k]) + "'"
elif type(condition) is str:
sql_where = condition
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Wrong condition, require dict or string', 'ERROR')
raise
sql_condition = "DELETE FROM " + table + " WHERE " + sql_where + ";"
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_condition, 'DEBUG')
cursor.execute(sql_condition)
connection.commit()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
def sync(self, dst_db, src_table, sync_col, *src_cols):
"""
Method - sync to another database
Args:
dst_db: object
src_table: name of source table
sync_col: name of table that will either be 'null' or DateTime
src_cols: array of colums to sync
Example: object.sync(objTargetDB, 'table', 'sync_col', 'col1', 'col2', 'col3')
"""
id_col = 'id'
if self.dbtype == 'sqlite':
import sqlite3
connection = sqlite3.connect(self.dbconnect)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to sqlite ' + str(self.dbconnect), 'INFO')
elif self.dbtype == 'mysql':
import MySQLdb
host, port, user, passwd, db = self.dbconnect
connection = MySQLdb.connect(host=host, port=int(port), user=user, passwd=passwd, db=db)
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Connected to mysql ' + str(self.dbconnect), 'INFO')
cursor = connection.cursor()
all_cols = ','.join(src_cols)
sql_where = sync_col + " LIKE '%null%'"
sql_condition = "SELECT " + id_col + "," + all_cols + " FROM " + src_table + " WHERE " + sql_where + ";"
# noinspection PyPep8
if self.oLogger: self.oLogger.log(sql_condition, 'DEBUG')
cursor.execute(sql_condition)
data = cursor.fetchall()
connection.close()
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Closing database connection')
target_data = {}
row_no = 1
# Iterate through rows
for row in data:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Syncing Row ' + str(row_no) + '/' + str(len(data)), 'INFO')
# Get the ID of the row in source database to update sync column
# Convert tuple to list to remove element
row = list(row)
row_id = row.pop(0)
i = 0
# iterate columns to create record
for col in src_cols:
target_data[col] = row[i]
i = i + 1
# Creating record in the sync database
target_data[sync_col] = get_datetime()
dst_db.insert(src_table, target_data)
row_no = row_no + 1
# Update source database sync column
target_data = {sync_col: get_datetime()}
self.update(src_table, row_id, target_data)
# noinspection Annotator
class ClassConfig:
""" Class to manager config-files, return True if found """
def __init__(self, configfile, logger_object=None):
import ConfigParser
self.configfile = configfile
self.oLogger = logger_object
self.config = ConfigParser.RawConfigParser()
try:
self.oConfigFile = open(self.configfile, 'r')
except IOError as e:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Could not load config file ' + configfile, 'INFO')
# noinspection PyPep8
if self.oLogger: self.oLogger.log(e, 'INFO')
try:
self.oConfigFile = open(self.configfile, 'w')
self.config.write(self.oConfigFile)
except ValueError as e:
# noinspection PyPep8
if self.oLogger: self.oLogger('Could not load/create config file, ' + e, 'ERROR')
raise
else:
self.config.readfp(self.oConfigFile)
def add_update(self, section, parameters):
"""
Args:
section: the name of the section
parameters: dict
"""
# Check that there are no section yet
if not self.config.has_section(section):
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Creating section ' + section, 'INFO')
self.config.add_section(section)
# iterate through options in dict
if type(parameters) is dict:
# Iterate through the rest of values
for k in parameters.keys():
if not self.config.has_option(section, parameters[k]):
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Adding or updating ' + section +
' with value ' + k + '=' + parameters[k], 'INFO')
self.config.set(section, k, parameters[k])
self.oConfigFile = open(self.configfile, 'w')
self.config.write(self.oConfigFile)
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('The parameters has to be a dict', 'ERROR')
raise
def get_all(self, section):
"""
Args:
section: the name of the secion
Return: dict with all parameters
"""
# Check that there are no section yet
if self.config.has_section(section):
return dict(self.config.items(section))
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Could not read parameters from ' + section, 'INFO')
return None
def get(self, section, parameter):
"""
Args:
section: the name of the secion and parameter
Return: string
:param parameter:
"""
# Check that there are no section yet
if self.config.has_section(section):
return str(self.config.get(section, parameter))
else:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Could not read parameters from ' + section, 'INFO')
return None
def sections(self):
""" Get all sections """
return self.config.sections()
class MyExpect:
""" Expect class """
def __init__(self, cmd, timeout=30, logger_object=None):
import pexpect
self.cmd = cmd
self.timeout = timeout
self.oLogger = logger_object
if type(cmd) is not list:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Spawn command: ' + cmd, 'DEBUG')
self.child = pexpect.spawn(cmd, timeout=self.timeout)
elif type(cmd) is list:
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Spawn command: ' + str(cmd[0]) + " args=" + str(cmd[1]), 'DEBUG')
self.child = pexpect.spawn(cmd[0], args=cmd[1], timeout=self.timeout)
def kill(self, return_code):
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Expect killing thread', 'DEBUG')
self.child.kill(return_code)
def log(self, file):
import sys
if file == 'stdout':
self.child.logfile = sys.stdout
else:
f = open(file, 'w')
self.child.logfile = f
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Expect output to: ' + file, 'INFO')
def expect(self, response):
""" Usage: result = oExp.expect(['.+\[\d+\]>', pexpect.TIMEOUT, pexpect.EOF]) """
result = self.child.expect(response), self.child.before, self.child.after
if self.oLogger:
self.oLogger.log('Expecting: ' + str(response), 'DEBUG')
self.oLogger.log('Received: ' + str(result[0]), 'DEBUG')
return result
def send(self, cmd):
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Sendline: ' + cmd, 'DEBUG')
self.child.send(cmd)
def sendline(self, cmd):
# noinspection PyPep8
if self.oLogger: self.oLogger.log('Sendline: ' + cmd, 'DEBUG')
self.child.sendline(cmd)
def isalive(self):
return self.child.isalive()
def test_socket(host, port, logger_object=None):
""" Function to test host:port for connection and return 0 or 1 """
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# noinspection PyPep8
try:
s.connect((host, port))
s.shutdown(2)
if logger_object:
logger_object.log('Connection success, socket ' + host + ':' + str(port), 'INFO')
return 0
except:
if logger_object:
logger_object.log('Connection faile socket ' + host + ':' + str(port), 'INFO')
return 1
class Process:
"""
Class to start a subprocess
example: command = Process("/bin/sh", "-c", "while true; do omxplayer " + self.song + " ; done")
"""
def __init__(self, *args):
import subprocess
self.commands = args
# noinspection PyPep8
self.process = subprocess.Popen(tuple(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = self.process.pid
self.stdout = self.process.stdout
self.stderr = self.process.stderr
def kill(self):
import subprocess
# noinspection PyPep8
get_gpid = subprocess.Popen(['/bin/sh', '-c', 'ps x -o "%p %r" | egrep "\s*' + str(self.pid) + '"'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
gpid = str(get_gpid.stdout.read().split()[1])
# print("gpid: " + gpid)
# noinspection PyPep8
kill_cmd = subprocess.Popen(['pkill', '-g', gpid], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print kill_cmd.stdout.read()
# print kill_cmd.stderr.read()
class Lcd(threading.Thread):
"""
Class object to display text on lcd-display
lcd = Lcd(sleep_time=0.5)
"""
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import Queue
self.counter = 0
self.objLog = logger_object
self.queue = Queue.Queue()
if 'default' in kwargs:
self.default = kwargs['default']
else:
self.default = None
self.running = False
if 'sleep_time' in kwargs:
self.sleep_time = kwargs['sleep_time']
else:
self.sleep_time = 0.5
def text(self, arg_string, arg_time, arg_light=True):
""" Add text to display to queue """
self.queue.put([arg_string, arg_time, arg_light])
def run(self):
# import pcd8544.lcd as lcd
import time
import os
import pcd8544.lcd as lcd
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Lcd - has to be run as root', 'CRITICAL')
else:
print('Lcd - has to be run as root')
return 1
on, off = [1, 0]
lcd.init()
lcd.cls()
lcd.text('')
if self.default is not None:
lcd.text(self.default)
self.running = True
while self.running is True:
if self.queue.qsize() > 0:
# Get next text and time
lcd_text, lcd_time, lcd_light = self.queue.get()
try:
# lcd.init()
lcd.cls()
if lcd_light:
lcd.backlight(on)
else:
lcd.backlight(off)
# Handle linebreak
if lcd_text.find('\n'):
tmp = lcd_text.split('\n')
line_no = 0
for line in tmp:
lcd.gotorc(line_no, 0)
lcd.text(line)
line_no += 1
else:
lcd.text(lcd_text)
# Wait
time.sleep(lcd_time)
# Increase counter
self.counter += 1
finally:
lcd.cls()
if self.queue.qsize() == 0:
lcd.backlight(off)
if self.default is not None:
# Handle linebreak
if self.default.find('\n'):
tmp = self.default.split('\n')
line_no = 0
for line in tmp:
lcd.gotorc(line_no, 0)
lcd.text(line)
line_no += 1
else:
lcd.text(self.default)
# Pause for next poll
time.sleep(self.sleep_time)
# Reset LCD
'''
if self.counter > 10:
lcd.spi.close()
time.sleep(1)
lcd.init()
lcd.cls()
lcd.text('')
if self.default is not None:
lcd.text(self.default)
self.counter = 0
print "RESET LCD"
'''
def change_default(self, arg_string):
""" Change and update default """
import pcd8544.lcd as lcd
self.default = arg_string
lcd.cls()
lcd.text(self.default)
def blink(self, times=3):
""" Blink - period in seconds """
for i in range(times):
self.queue.put(['', self.sleep_time, True])
self.queue.put(['', self.sleep_time, False])
def blank(self):
""" Empty queue """
while self.queue.qsize() > 0:
lcd_text, lcd_time, lcd_light = self.queue.get()
def stop(self):
import pcd8544.lcd as lcd
self.running = False
lcd.cls()
class PirMotion(threading.Thread):
"""
Class object to handle pir motion sensor
object = PirMotion(pin=4, check_int=0.5, log_object)
"""
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
self.all_motions = []
import Queue
import RPi.GPIO as io
self.objLog = logger_object
self.queue = Queue.Queue()
self.running = False
self.motion = False
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 4
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 0.5
io.setmode(io.BCM)
io.setup(self.pin, io.IN)
def run(self):
import time
import os
import RPi.GPIO as io
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('PirMotion - has to be run as root', 'CRITICAL')
else:
print('PirMotion - has to be run as root')
return 1
self.running = True
while self.running:
# Get pir alarm
if io.input(self.pin) and self.motion is not True:
if self.objLog:
self.objLog.log('PirMotion - Motion Detected', 'INFO')
else:
print('PirMotion - Motion Detected')
self.motion = True
epoch = int(time.time())
self.queue.put(epoch)
elif not io.input(self.pin) and self.motion is True:
self.motion = False
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
def get(self, past_seconds=0):
""" Get the motions within the past seconds """
import time
import Queue
while True:
try:
motion_time = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if motion_time >= now - past_seconds:
self.all_motions.append(motion_time)
else:
self.all_motions.append(motion_time)
r = self.all_motions
self.all_motions = []
return r
class Switch(threading.Thread):
"""
Class object to handle door switch
object = Switch(pin=18, check_int=0.5, log_object)
"""
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
self.all_status = []
import Queue
import RPi.GPIO as io
self.objLog = logger_object
self.queue = Queue.Queue()
self.running = False
self.status = None
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 17
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 0.5
io.setmode(io.BCM)
# Activate input with PullUp
io.setup(self.pin, io.IN, pull_up_down=io.PUD_UP)
def run(self):
import time
import os
import RPi.GPIO as io
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Switch - has to be run as root', 'CRITICAL')
else:
print('Switch - has to be run as root')
return 1
self.running = True
# Get initial status and supply to queue
if io.input(self.pin):
self.status = "Open"
else:
self.status = "Close"
epoch = int(time.time())
self.queue.put((epoch, self.status))
while self.running:
# Get current door status
if io.input(self.pin) and self.status == "Close":
if self.objLog:
self.objLog.log('Switch - Open', 'INFO')
else:
print('Switch - Open')
self.status = "Open"
epoch = int(time.time())
self.queue.put((epoch, self.status))
elif not io.input(self.pin) and self.status == "Open":
if self.objLog:
self.objLog.log('Switch - Close', 'INFO')
else:
print('Switch - Close')
self.status = "Close"
epoch = int(time.time())
self.queue.put((epoch, self.status))
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
def get(self, past_seconds=0):
""" Get the motions within the past seconds """
import time
import Queue
while True:
try:
switch_status = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if switch_status[0] >= now - past_seconds:
self.all_status.append(switch_status)
else:
self.all_status.append(switch_status)
r = self.all_status
self.all_status = []
return r
class DHT(threading.Thread):
"""
Class object to read temp or humid from DHT_11 sensor
object = DHT_humid(pin=4, limit=1, check_int=10, type=0(humid)/1(temp), log_object)
"""
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
self.all_status = []
import Queue
import Adafruit_DHT
self.objLog = logger_object
self.queue = Queue.Queue()
self.running = False
self.value = 0
self.type = kwargs.get('type', 1)
self.limit = float(kwargs.get('limit', 0.5))
self.verify_times = 3
self.sensor = kwargs.get('sensor', Adafruit_DHT.DHT11)
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 4
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 10
def run(self):
import Adafruit_DHT
import time
import os
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('DHT - has to be run as root', 'CRITICAL')
else:
print('DHT - has to be run as root')
return 1
def changed(old, new, limit):
if new > old + limit:
return True
elif new < old - limit:
return True
else:
return False
self.running = True
# Get initial status and supply to queue
self.value = Adafruit_DHT.read_retry(self.sensor, self.pin)[self.type]
epoch = int(time.time())
self.queue.put((epoch, self.value))
# noinspection PyPep8
while self.running:
# Get new value
new_value = Adafruit_DHT.read_retry(self.sensor, self.pin)[self.type]
# Read and ignore miss-readings
# noinspection PyPep8
verified = [changed(self.value, Adafruit_DHT.read_retry(self.sensor, self.pin)[self.type], self.limit) for i
in range(1, self.verify_times)]
# print "debug: type %s ((%s > %s + %s) or (%s < %s - %s)) and (%s)" % (self.type, new_value, self.value, self.limit, new_value, self.value, self.limit, verified)
condition = ((new_value > self.value + self.limit) or (new_value < self.value - self.limit)) and all(
verified)
if ((new_value > self.value + self.limit) or (new_value < self.value - self.limit)) and all(verified):
if self.objLog:
# noinspection PyPep8
self.objLog.log(
'DHT Type %s exceeds limit of %s, new value %s' % (self.type, self.limit, new_value))
else:
print 'DHT Type %s exceeds limit of %s, new value %s' % (self.type, self.limit, new_value)
self.value = new_value
epoch = int(time.time())
self.queue.put((epoch, self.value))
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
def get(self, past_seconds=0):
""" Get the motions within the past seconds """
import time
import Queue
while True:
try:
switch_status = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if switch_status[0] >= now - past_seconds:
self.all_status.append(switch_status)
else:
self.all_status.append(switch_status)
r = self.all_status
self.all_status = []
return r
def readadc(adcnum, clockpin=18, mosipin=24, misopin=23, cspin=25):
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# set up the SPI interface pins
GPIO.setup(mosipin, GPIO.OUT)
GPIO.setup(misopin, GPIO.IN)
GPIO.setup(clockpin, GPIO.OUT)
GPIO.setup(cspin, GPIO.OUT)
if (adcnum > 7) or (adcnum < 0):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if commandout & 0x80:
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if GPIO.input(misopin):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
class McpValue(threading.Thread):
# noinspection PyPep8
"""
Class object to read MCP3008
object = MCPvalue(minref, adc_in, **kwargs)
object = MCPvalue(510, 0, clockpin=18, mosipin=24, misopin=23, cspin=25, check_int=1, sleep_int=0.000001, check_times=100, logger_object=logger_object)
"""
def __init__(self, minref=510, adc_in=0, **kwargs):
threading.Thread.__init__(self)
import Queue
import RPi.GPIO as io
self.minref = minref
self.adc_in = adc_in
self.clockpin = kwargs.get('clockpin', 18)
self.mosipin = kwargs.get('mosipin', 24)
self.misopin = kwargs.get('misopin', 23)
self.cspin = kwargs.get('cspin', 25)
self.check_int = kwargs.get('check_int', 1)
self.sleep_int = kwargs.get('sleep_int', 0.000001)
self.check_times = kwargs.get('check_times', 100)
self.logger_object = kwargs.get('logger_object', None)
self.objLog = kwargs.get('logger_object', None)
self.queue = Queue.Queue()
self.running = False
self.status = None
io.setmode(io.BCM)
io.setwarnings(False)
def poll_value(self, minref=0, adc_in=0, debug=False, sleep_int=0.000001, max_retry=10):
""" Poll value """
import time
count = 0
retry = 0
result = 0
peak = 0
while retry < max_retry:
for x in range(0, self.check_times):
value_in = readadc(self.adc_in, self.clockpin, self.mosipin, self.misopin, self.cspin)
if minref < value_in <= 1023 and value_in > 0:
count += 1
result += value_in - minref
if value_in > peak:
peak = value_in
time.sleep(self.sleep_int)
if count > 0:
try:
# avg_val = int(round(float(result) / float(count) / minref * 100))
avg_val = int(round(float(result) / float(count) / (1023 - minref) * 100))
max_val = int(round(float(peak - minref) / (1023 - minref) * 100))
except Exception:
avg_val = 0
max_val = 0
count_val = count
if debug is True:
# noinspection PyPep8
print 'MIN_REF: %s, AVG: (%s/%s)/1023=%s%%, MAX: (%s-%s)/1023=%s%%, COUNT: %s' % (
minref, result, count, avg_val, peak, minref, max_val, count_val)
# Return value
return avg_val, max_val, count
else:
time.sleep(self.check_int)
result = 0
peak = 0
count = 0
retry += 1
# Restart next attempt
class PowerMeter(McpValue):
# noinspection PyPep8
"""
Class object to Non-Intrusive Current Meter through MCP3008
object = PowerMeter(minref, adc_in, **kwargs)
object = PowerMeter(510, 0, clockpin=18, mosipin=24, misopin=23, cspin=25, check_int=1, sleep_int=0.000001, logger_object=logger_object)
"""
def __init__(self, minref, adc_in, **kwargs):
self.all_status = []
import Queue
McpValue.__init__(self)
self.minref = minref
self.adc_in = adc_in
self.clockpin = kwargs.get('clockpin', 18)
self.mosipin = kwargs.get('mosipin', 24)
self.misopin = kwargs.get('misopin', 23)
self.cspin = kwargs.get('cspin', 25)
self.check_int = kwargs.get('check_int', 1)
self.sleep_int = kwargs.get('sleep_int', 0.000001)
self.objLog = kwargs.get('logger_object', None)
self.limit = kwargs.get('limit', 5)
self.debug = kwargs.get('debug', False)
self.queue = Queue.Queue()
self.running = False
self.avg_val = 0
self.max_val = 0
self.count = 0
self.verify_times = 3
def run(self):
import time
import os
def changed(old, new, limit):
if old + limit > 100:
# noinspection PyPep8
if new < old - limit: return True
if old - limit < 0:
# noinspection PyPep8
if new > old + limit: return True
if new > old + limit or new < old - limit:
return True
else:
return False
device_type = 'PowerMeter'
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('%s - has to be run as root' % device_type, 'CRITICAL')
else:
print('%s - has to be run as root' % device_type)
return 1
self.running = True
# Get initial status and supply to queue
# noinspection PyPep8
self.avg_val, self.max_val, self.count = self.poll_value(self.minref, self.adc_in, debug=self.debug,
sleep_int=0.000001, max_retry=10)
epoch = int(time.time())
self.queue.put((epoch, self.avg_val))
while self.running:
# For Max
# noinspection PyPep8
current_power = \
self.poll_value(self.minref, self.adc_in, debug=self.debug, sleep_int=0.000001, max_retry=10)[1]
# Returns a list of True/False based on verify_times used to
# determine false switch
# noinspection PyPep8
verified = [changed(self.max_val,
self.poll_value(self.minref, self.adc_in, debug=self.debug, sleep_int=0.000001,
max_retry=10)[1], self.limit) for i in range(1, self.verify_times)]
if changed(self.max_val, current_power, self.limit) and all(verified):
self.max_val = current_power
if self.objLog:
self.objLog.log('PowerMeter: ' + str(self.max_val), 'INFO')
else:
print('PowerMeter: ' + str(self.max_val))
epoch = int(time.time())
self.queue.put((epoch, self.max_val))
def stop(self):
self.running = False
def reset(self):
""" Reset current status to force update to Queue """
self.avg_val = 0
self.max_val = 0
self.count = 0
def get(self, past_seconds=0):
""" Get the power changes within the past seconds """
import time
import Queue
while True:
try:
power_status = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if power_status[0] >= now - past_seconds:
self.all_status.append(power_status)
else:
self.all_status.append(power_status)
return self.all_status
class AdcMeter(McpValue):
# noinspection PyPep8
"""
Class object to Messure percantage through MCP3008
object = ADCmeter(adc_in, **kwargs)
object = ADCmeter(0, clockpin=18, mosipin=24, misopin=23, cspin=25, check_int=1, sleep_int=0.000001, pause_int=1, logger_object=logger_object)
"""
def __init__(self, adc_in=0, **kwargs):
self.all_status = []
import Queue
McpValue.__init__(self)
self.minref = 0
self.adc_in = adc_in
self.clockpin = kwargs.get('clockpin', 18)
self.mosipin = kwargs.get('mosipin', 24)
self.misopin = kwargs.get('misopin', 23)
self.cspin = kwargs.get('cspin', 25)
self.check_int = kwargs.get('check_int', 1)
self.sleep_int = kwargs.get('sleep_int', 1)
self.objLog = kwargs.get('logger_object', None)
self.limit = kwargs.get('limit', 5)
self.check_times = kwargs.get('check_times', 1)
self.pause_int = kwargs.get('pause_int', 1)
self.debug = kwargs.get('debug', False)
self.queue = Queue.Queue()
self.running = False
self.avg_val = 0
self.max_val = 0
self.count = 0
self.verify_times = 1
def run(self):
import time
import os
def changed(old, new, limit):
if old + limit > 100:
# noinspection PyPep8
if new < old - limit: return True
if old - limit < 0:
# noinspection PyPep8
if new > old + limit: return True
if new > old + limit or new < old - limit:
return True
else:
return False
device_type = 'ADCpercantage'
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('%s - has to be run as root' % device_type, 'CRITICAL')
else:
print('%s - has to be run as root' % device_type)
return 1
self.running = True
# Get initial status and supply to queue
# noinspection PyPep8
self.avg_val, self.max_val, self.count = self.poll_value(self.minref, self.adc_in, debug=self.debug,
sleep_int=1, max_retry=1)
epoch = int(time.time())
self.queue.put((epoch, self.avg_val))
while self.running:
# For Average
current_value = self.poll_value(self.minref, self.adc_in, debug=self.debug, sleep_int=1, max_retry=1)[0]
if changed(self.avg_val, current_value, self.limit):
self.avg_val = current_value
if self.objLog:
self.objLog.log('ValueMeter: ' + str(self.avg_val), 'INFO')
else:
print('ValueMeter: ' + str(self.avg_val))
epoch = int(time.time())
self.queue.put((epoch, self.avg_val))
# Pause the time
time.sleep(self.pause_int)
def stop(self):
self.running = False
def reset(self):
""" Reset current status to force update to Queue """
self.avg_val = 0
self.max_val = 0
self.count = 0
def get(self, past_seconds=0):
""" Get the ADC changes within the past seconds """
import time
import Queue
while True:
try:
value_status = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if value_status[0] >= now - past_seconds:
self.all_status.append(value_status)
else:
self.all_status.append(value_status)
r = self.all_status
self.all_status = []
return r
def epoch_to_date(arg_epoch):
""" Function to convert epoch to DateTime """
import time
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(arg_epoch)))
def get_local_ip():
""" Returns local IP-address """
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com", 80))
ip = s.getsockname()[0]
except ValueError:
ip = None
return ip
class Button(threading.Thread):
"""
Class object to handle button
object = Button(3, {1: [function1,args], 2: [function2,args]}, pin=22, check_int=0.1, log_object)
"""
def __init__(self, timeout=1, functions=None, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import RPi.GPIO as io
self.objLog = logger_object
self.running = False
self.status = None
self.timeout = timeout
self.count = 0
self.timer = 0
self.functions = functions
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 22
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 0.01
io.setmode(io.BCM)
# Activate input with PullUp
io.setup(self.pin, io.IN, pull_up_down=io.PUD_UP)
def run(self):
import time
import os
import RPi.GPIO as io
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Button - has to be run as root', 'CRITICAL')
else:
print('Button - has to be run as root')
return 1
self.running = True
while self.running:
# Get current door status
# GPIO.add_event_detect(22, GPIO.RISING, callback=printFunction, bouncetime=300)
if not io.input(self.pin):
io.wait_for_edge(self.pin, io.RISING)
self.count += 1
while self.timer < self.timeout and self.running:
time.sleep(self.check_int)
self.timer += self.check_int
if not io.input(self.pin):
io.wait_for_edge(self.pin, io.RISING)
self.count += 1
self.timer = 0
if self.timer > self.timeout:
# Check what to do, if one click e.g {1: function1}
print "Button pressed " + str(self.count) + " times"
if self.functions is not None:
if self.count in self.functions.keys():
# Get func and args from dict
func, args = self.functions[self.count]
# Run function and depack args
func(*args)
self.count = 0
self.timer = 0
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
class Led(threading.Thread):
"""
Class object to blink led in background
object = Led(pin=25, check_int=0.1, log_object)
"""
def __init__(self, blink_type=None, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import RPi.GPIO as io
self.objLog = logger_object
self.running = False
self.status = None
self.blink_type = blink_type
self.kwargs = kwargs
if 'on' in kwargs:
self.on = kwargs['on']
else:
self.on = 1
if 'off' in kwargs:
self.off = kwargs['off']
else:
self.off = 1
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 25
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 0.1
self.off_time = 1
self.on_time = 1
# Disable warnings
io.setwarnings(False)
io.setmode(io.BCM)
io.setup(self.pin, io.OUT)
io.output(self.pin, io.HIGH)
def run(self):
import time
import os
import RPi.GPIO as io
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Led - has to be run as root', 'CRITICAL')
else:
print('Led - has to be run as root')
return 1
self.running = True
while self.running:
# Get current sort of led
if self.blink_type == 'blink':
if self.status == 'on':
io.output(self.pin, io.HIGH)
self.status = 'off'
time.sleep(self.off_time)
if self.status == 'off':
io.output(self.pin, io.LOW)
self.status = 'on'
time.sleep(self.on_time)
if self.blink_type == 'on':
io.output(self.pin, io.LOW)
self.status = 'on'
if self.blink_type == 'off':
io.output(self.pin, io.HIGH)
self.status = 'off'
# Wait for next check
time.sleep(self.check_int)
# Stop led
io.output(self.pin, io.HIGH)
def blink(self, on=1, off=1):
""" Start blinking """
self.blink_type = 'blink'
self.on_time = on
self.off_time = off
def led_on(self, period=0):
""" Turn on for a period """
import time
self.blink_type = 'on'
if period > 0:
time.sleep(period)
self.status = 'off'
def led_off(self):
""" Turn off led """
self.blink_type = 'off'
def stop(self):
self.blink_type = 'off'
self.running = False
class Buzzer(threading.Thread):
"""
Class object to make noise with piezo speaker
object = Buzzer(pin=27, check_int=0.1, log_object)
"""
def __init__(self, buzz_type=None, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import RPi.GPIO as io
self.objLog = logger_object
self.running = False
self.status = None
self.buzz_type = buzz_type
self.kwargs = kwargs
if 'pin' in kwargs:
self.pin = kwargs['pin']
else:
self.pin = 27
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 0.1
io.setwarnings(False)
io.setmode(io.BCM)
io.setup(self.pin, io.OUT)
def run(self):
import time
import os
import RPi.GPIO as io
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Buzzer - has to be run as root', 'CRITICAL')
else:
print('Buzzer - has to be run as root')
return 1
self.running = True
while self.running:
# Get current sort of buzz
if self.buzz_type == 'on':
io.output(self.pin, io.HIGH)
self.status = 'on'
if self.buzz_type == 'off':
io.output(self.pin, io.LOW)
self.status = 'off'
if self.buzz_type == 'sos':
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(1)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(1)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(1)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(1)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
io.output(self.pin, io.HIGH)
time.sleep(0.5)
io.output(self.pin, io.LOW)
time.sleep(0.5)
self.buzz_type = 'off'
# Wait for next check
time.sleep(self.check_int)
# Complete turn off buzzer
io.output(self.pin, io.LOW)
def buzz_on(self, period=0):
""" Turn on for a period """
import time
self.buzz_type = 'on'
if period > 0:
time.sleep(period)
self.buzz_type = 'off'
def buzz_off(self):
""" Turn off """
self.buzz_type = 'off'
def buzz_sos(self):
""" Play SOS """
self.buzz_type = 'sos'
def stop(self):
self.running = False
def send_gmail(arg_server, arg_port, arg_user, arg_pass, arg_to, arg_from, arg_subject, arg_body, **kwargs):
""" Function to send a mail with attachments through gmail """
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from email.mime.application import MIMEApplication
import smtplib
import re
import os
# Create the root message and fill in the from, to, and subject headers
msg_root = MIMEMultipart('related')
msg_root['Subject'] = arg_subject
msg_root['From'] = arg_from
msg_root['To'] = arg_to
msg_root.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msg_alternative = MIMEMultipart('alternative')
msg_root.attach(msg_alternative)
msg_text = MIMEText('Your mail-client doesnt support HTML')
msg_alternative.attach(msg_text)
# We reference the image in the IMG SRC attribute by the ID we give it below
var_body_text = arg_body
# Create list of images
if 'Files' in kwargs.keys():
arg_files = kwargs['Files']
list_images = [x for x in arg_files if re.match(".+\.(jpg|jpeg|png)$", x, re.IGNORECASE)]
list_non_images = [x for x in arg_files if not re.match(".+\.(jpg|jpeg|png)$", x, re.IGNORECASE)]
for i, varImage in enumerate(list_images, start=1):
var_body_text = var_body_text + '<img src="cid:image' + str(i) + '"><br>'
msg_text = MIMEText(var_body_text, 'html')
msg_alternative.attach(msg_text)
# Attach Images
for i, varImage in enumerate(list_images, start=1):
print "Attaching {} to mail".format(varImage)
fp = open(varImage, 'rb')
msg_image = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced above
msg_image.add_header('Content-ID', '<image' + str(i) + '>')
msg_root.attach(msg_image)
# Attach other files
for i, varFile in enumerate(list_non_images, start=1):
print "Attaching {} to mail".format(varFile)
fp = open(varFile, 'rb')
msg_attach = MIMEApplication(fp.read())
fp.close()
msg_attach.add_header('Content-Disposition', 'attachment', filename=os.path.basename(varFile))
msg_root.attach(msg_attach)
msg_text = MIMEText(var_body_text, 'html')
msg_alternative.attach(msg_text)
gmail_user = arg_user
gmail_password = arg_pass
# noinspection PyPep8
if arg_server is None: arg_server = "smtp.gmail.com"
# noinspection PyPep8
if arg_port is None: arg_port = 587
print "Start sending mail"
mail_server = smtplib.SMTP(arg_server, arg_port)
mail_server.ehlo()
mail_server.starttls()
mail_server.ehlo()
mail_server.login(gmail_user, gmail_password)
mail_server.sendmail(gmail_user, arg_to, msg_root.as_string())
mail_server.close()
print "Done sending mail"
class Gmail:
"""
Class object to queue and send gmail
"""
def __init__(self, arg_user, arg_pass, arg_from=None, arg_server=None, arg_port=None, logger_object=None):
self.objLog = logger_object
self.running = False
self.user = arg_user
self.password = arg_pass
# noinspection PyPep8
if arg_from is None: arg_from = arg_user
self.mail_from = arg_from
# noinspection PyPep8
if arg_server is None: arg_server = "smtp.gmail.com"
# noinspection PyPep8
if arg_port is None: arg_port = 587
self.server = arg_server
self.port = arg_port
def send(*func_arg):
import threading
t = threading.Thread(target=Gmail.worker, args=func_arg)
t.start()
def worker(self, arg_to, arg_subject, arg_body, arg_file=None):
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
from email.mime.application import MIMEApplication
import smtplib
import re
import os
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = arg_subject
msgRoot['From'] = self.mail_from
msgRoot['To'] = arg_to
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msg_alternative = MIMEMultipart('alternative')
msgRoot.attach(msg_alternative)
msg_text = MIMEText('Your mail-client doesnt support HTML')
msg_alternative.attach(msg_text)
# We reference the image in the IMG SRC attribute by the ID we give it below
var_body_text = arg_body
# Create list of images
if arg_file is not None:
list_images = [x for x in arg_file if re.match(".+\.(jpg|jpeg|png)$", x, re.IGNORECASE)]
list_non_images = [x for x in arg_file if not re.match(".+\.(jpg|jpeg|png)$", x, re.IGNORECASE)]
for i, varImage in enumerate(list_images, start=1):
var_body_text = var_body_text + '<img src="cid:image' + str(i) + '"><br>'
msg_text = MIMEText(var_body_text, 'html')
msg_alternative.attach(msg_text)
# Attach Images
for i, varImage in enumerate(list_images, start=1):
fp = open(varImage, 'rb')
msg_image = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced above
msg_image.add_header('Content-ID', '<image' + str(i) + '>')
msgRoot.attach(msg_image)
# Attach other files
for i, varFile in enumerate(list_non_images, start=1):
fp = open(varFile, 'rb')
msg_attach = MIMEApplication(fp.read())
fp.close()
msg_attach.add_header('Content-Disposition', 'attachment', filename=os.path.basename(varFile))
msgRoot.attach(msg_attach)
msg_text = MIMEText(var_body_text, 'html')
msg_alternative.attach(msg_text)
gmail_user = self.user
gmail_password = self.password
mail_server = smtplib.SMTP(self.server, self.port)
mail_server.ehlo()
mail_server.starttls()
mail_server.ehlo()
mail_server.login(gmail_user, gmail_password)
mail_server.sendmail(gmail_user, arg_to, msgRoot.as_string())
mail_server.close()
# noinspection PyMethodMayBeStatic
class FTP(threading.Thread):
""" Class for using FTP """
def __init__(self, ftp_server, ftp_user, ftp_pass, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import ftplib
self.ftp = ftplib.FTP()
self.ftp = ftplib.FTP()
import Queue
self.queue = Queue.Queue()
self.objLog = logger_object
self.running = False
self.server = ftp_server
self.user = ftp_user
self.ftpPass = ftp_pass
self.logger_object = logger_object
self.status = None
# noinspection PyPep8
if not 'port' in kwargs:
self.port = 21
else:
self.port = kwargs['port']
def chdir(self, ftp_path, ftp_conn):
""" function to iterate through dirs using ftplib """
def check_dir(dir, ftp_conn):
""" Assure all dirs exists """
filelist = []
ftp_conn.retrlines('LIST', filelist.append)
found = False
for f in filelist:
if f.split()[-1] == dir and f.lower().startswith('d'):
found = True
if not found:
ftp_conn.mkd(dir)
ftp_conn.cwd(dir)
dirs = [d for d in ftp_path.split('/') if d != '']
for p in dirs:
check_dir(p, ftp_conn)
def run(self):
import os
import time
self.running = True
while self.running:
if self.queue.qsize() > 0:
self.status = "transfer"
command, next_file_list, next_file_dir = self.queue.get()
# Initiate FTP object
try:
self.ftp.connect(self.server, self.port)
self.ftp.login(self.user, self.ftpPass)
if command == 'upload':
for ftpFile in next_file_list:
# noinspection PyPep8
if next_file_dir is not None: self.chdir(next_file_dir, self.ftp)
file = open(ftpFile, 'rb')
self.ftp.storbinary('STOR ' + os.path.basename(ftpFile), file)
file.close()
if command == 'download':
for ftpFile in next_file_list:
# noinspection PyPep8
if next_file_dir is not None: self.chdir(next_file_dir, self.ftp)
if next_file_dir is not None:
# noinspection PyPep8
self.ftp.retrbinary("RETR " + ftpFile, open(
os.path.dirname(next_file_dir) + "/" + os.path.basename(ftpFile), 'wb').write)
else:
self.ftp.retrbinary("RETR " + ftpFile, open(os.path.basename(ftpFile), 'wb').write)
if self.objLog:
self.objLog.log("FTP command: " + command + ", Files: " + str(next_file_list), 'INFO')
else:
print("FTP command: " + command + ", Files: " + str(next_file_list))
self.ftp.quit()
except ValueError:
self.objLog.log("FTP fail " + str(ValueError), "ERROR")
time.sleep(3)
self.status = None
def upload(self, file_list, file_dir=None):
""" Add files for upload to queue """
self.queue.put(['upload', file_list, file_dir])
def download(self, file_list, file_dir=None):
""" Add files for download to queue """
self.queue.put(['download', file_list, file_dir])
def dir(self, ftp_dir='.'):
""" List directory """
self.ftp.connect(self.server, self.port)
self.ftp.login(self.user, self.ftpPass)
dir_list = self.ftp.dir(ftp_dir)
self.ftp.quit()
return dir_list
def stop(self):
self.running = False
def PiCamera(filename, res=None):
import picamera
import time
with picamera.PiCamera() as camera:
# noinspection PyPep8
if res is not None: camera.resolution = (800, 600)
camera.start_preview()
# Camera warm-up time
time.sleep(2)
camera.capture(filename)
camera.stop_preview()
def read_lux_meter():
# Get I2C bus
bus = smbus.SMBus(1)
bus.write_byte_data(0x39, 0x00 | 0x80, 0x03)
bus.write_byte_data(0x39, 0x01 | 0x80, 0x02)
time.sleep(0.5)
data = bus.read_i2c_block_data(0x39, 0x0C | 0x80, 2)
data1 = bus.read_i2c_block_data(0x39, 0x0E | 0x80, 2)
# Convert the data
ch0 = data[1] * 256 + data[0]
ch1 = data1[1] * 256 + data1[0]
# Output data to screen
# print "Full Spectrum(IR + Visible) :%d lux" % ch0
# print "Infrared Value :%d lux" % ch1
# print "Visible Value :%d lux" % (ch0 - ch1)
return ch0
class Luxmeter:
i2c = None
def __init__(self, address=0x39, debug=0, pause=0.8):
# from Adafruit_I2C import Adafruit_I2C
import Adafruit_GPIO.I2C as I2C
self.i2c = I2C.Device(address, 1)
self.address = address
self.pause = pause
self.debug = debug
self.gain = 0 # no gain preselected
self.i2c.write8(0x80, 0x03) # enable the device
def set_gain(self, gain=1):
""" Set the gain """
import time
if gain != self.gain:
if gain == 1:
self.i2c.write8(0x81, 0x02)
if self.debug:
print "Setting low gain"
else:
self.i2c.write8(0x81, 0x12)
if self.debug:
print "Setting high gain"
self.gain = gain
# set gain = 1X and timing
# set gain = 16X and timing
# safe gain for calculation
time.sleep(self.pause)
def read_word(self, reg):
"""Reads a word from the I2C device"""
try:
wordval = self.i2c.readU16(reg)
newval = self.i2c.reverseByteOrder(wordval)
if self.debug:
print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, wordval & 0xFFFF, reg))
return newval
except IOError:
print("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def read_full(self, reg=0x8C):
"""Reads visible+IR diode from the I2C device"""
return self.read_word(reg)
def read_ir(self, reg=0x8E):
"""Reads IR only diode from the I2C device"""
return self.read_word(reg)
def get_lux(self, gain=1):
"""Grabs a lux reading either with autoranging (gain=0) or with a specified gain (1, 16)"""
if gain == 1 or gain == 16:
self.set_gain(gain) # low/highGain
ambient = self.read_full()
ir = self.read_ir()
elif gain == 0: # auto gain
self.set_gain(16) # first try highGain
ambient = self.read_full()
if ambient < 65535:
ir = self.read_ir()
if ambient >= 65535 or ir >= 65535: # value(s) exeed(s) data
self.set_gain(1) # set lowGain
ambient = self.read_full()
ir = self.read_ir()
if self.gain == 1:
ambient *= 16 # scale 1x to 16x
ir *= 16 # scale 1x to 16x
try:
ratio = (ir / float(ambient)) # changed to make it run under python
except ZeroDivisionError:
ratio = 0
if self.debug:
print "IR Result", ir
print "Ambient Result", ambient
if (ratio >= 0) & (ratio <= 0.52):
lux = (0.0315 * ambient) - (0.0593 * ambient * (ratio ** 1.4))
elif ratio <= 0.65:
lux = (0.0229 * ambient) - (0.0291 * ir)
elif ratio <= 0.80:
lux = (0.0157 * ambient) - (0.018 * ir)
elif ratio <= 1.3:
lux = (0.00338 * ambient) - (0.0026 * ir)
elif ratio > 1.3:
lux = 0
return lux
class LuxMeter(threading.Thread):
"""
Class object to read lux from a TSL2561
object = LuxMeter(limit=1, check_int=10, log_object)
"""
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
self.all_status = []
import Queue
self.objLog = logger_object
self.queue = Queue.Queue()
self.running = False
self.value = 0
self.limit = kwargs.get('limit', 5)
if 'check_int' in kwargs:
self.check_int = kwargs['check_int']
else:
self.check_int = 10
self.luxmeter = Luxmeter()
def run(self):
import time
import os
if not os.geteuid() == 0:
if self.objLog:
self.objLog.log('Lux - has to be run as root', 'CRITICAL')
else:
print('Lux - has to be run as root')
return 1
self.running = True
# Get initial status and supply to queue
self.value = int(read_lux_meter())
if self.value > 50:
self.value = 50
epoch = int(time.time())
self.queue.put((epoch, self.value))
while self.running:
# Get new value
new_value = int(read_lux_meter())
if new_value > 50:
new_value = 50
if (new_value > self.value + self.limit) or (new_value < self.value - self.limit):
if self.objLog:
self.objLog.log('Luxmeter exceeds limit of %s, new value %s' % (self.limit, new_value))
else:
print 'Luxmeter exceeds limit of %s, new value %s' % (self.limit, new_value)
self.value = new_value
epoch = int(time.time())
self.queue.put((epoch, self.value))
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
def get(self, past_seconds=0):
""" Get the motions within the past seconds """
import time
import Queue
while True:
try:
switch_status = self.queue.get(block=False)
except Queue.Empty:
break
else:
now = time.time()
if past_seconds > 0:
if switch_status[0] >= now - past_seconds:
self.all_status.append(switch_status)
else:
self.all_status.append(switch_status)
r = self.all_status
self.all_status = []
return r
class ModemDongle(threading.Thread):
"""
Class object to handle 3G Dongle
object = ModemDongle(log_object, tty='/dev/ttyUSB0')
object = ModemDongle(incoming_cmd={'search_for_word_in_sms': 'function_or_external_script_with_rest_as_args'})
"""
# noinspection PyProtectedMember
def __init__(self, logger_object=None, **kwargs):
threading.Thread.__init__(self)
import Queue
import sms
import ast
import time
time.sleep(60)
self.objLog = logger_object
self.queue = Queue.Queue()
self.running = False
self.status = None
# self.tty = kwargs.get('tty', '/dev/tty.HUAWEIMobile-Modem')
self.tty = kwargs.get('tty', '/dev/ttyUSB0')
self.check_int = int(kwargs.get('check_int', 10))
self.incoming_cmd = kwargs.get('incoming_cmd', {})
self.functions = kwargs.get('functions', None)
# Change string to dict if required
if type(self.incoming_cmd) is str:
self.incoming_cmd = ast.literal_eval(self.incoming_cmd)
# Initiate modem
self.m = sms.Modem(self.tty)
# Change SMS mode
self.m._command('AT+CMGF=1')
# Initiate memcache if it exists
# noinspection PyPep8
try:
import m
except:
print "Please install memcache to support reading status"
else:
self.shared = m.Client(['127.0.0.1:11211'], debug=1)
def run(self):
import time
import re
import subprocess
from datetime import datetime
self.running = True
# Bugfix
datetime.strptime('2012-01-01', '%Y-%m-%d')
while self.running:
# Check if memcache exists and with sms in queue
if 'shared' in dir(self):
sms_memcache = self.shared.get('sms')
if sms_memcache:
number, message = sms_memcache
if self.objLog:
print "Found sms in memcache queue for %s with body %s" % (number, message[:30])
self.send(number, message)
self.shared.set('sms', None)
# Check if any new incoming SMS
try:
msgs = self.m.messages()
except SerialException:
self.objLog.log('SMS DONGLE ERROR - REBOOTING in 3 hours', 'ERROR')
time.sleep(10800)
import subprocess
command = "/usr/bin/sudo /sbin/shutdown -r now"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print output
if len(msgs) > 0:
for sms in msgs:
if self.objLog:
self.objLog.log('Incoming SMS from %s with body: %s' % (sms.number, sms.text), 'INFO')
else:
print 'Incoming SMS from %s with body: %s' % (sms.number, sms.text)
# Handle incoming sms
if self.objLog:
self.objLog.log("Checking incoming SMS against rules %s" % (str(self.incoming_cmd),), 'INFO')
print "Checking incoming SMS against rules %s" % (str(self.incoming_cmd),)
for key in self.incoming_cmd.keys():
cmd = self.incoming_cmd[key]
if re.search("^%s\s*(.*)" % (key,), sms.text):
args = re.search("^%s\s*(.*)" % (key,), sms.text).groups()[0]
if self.objLog:
self.objLog.log('Found string "%s" in SMS' % (key,), 'INFO')
print 'Found string "%s" in SMS' % (key,)
# Check if function
cmd_func = cmd
if cmd_func in self.functions.keys():
print "Found %s in list of passed functions" % (cmd_func,)
cmd_func = self.functions[cmd_func]
if callable(cmd_func):
args = args.split()
print "Command is existing function, calling %s with args: %s" % (cmd, str(args))
# Might add arguments in the future
result = cmd_func(*args)
if self.objLog:
# noinspection PyPep8
self.objLog.log('Sending message to %s with body: %s' % (sms.number, str(result)),
'INFO')
else:
print 'Sending SMS to %s with body: %s' % (sms.number, str(result)[:50])
self.send(sms.number, str(result))
else:
print "No function, trying to call external script %s" % (cmd,)
# noinspection PyPep8
try:
result = subprocess.Popen('%s' % (cmd,), stdout=subprocess.PIPE).stdout.read()
self.send(sms.number, str(result).encode('ascii', 'replace')[:160])
except:
print "Could not find function nor external script - skip"
if self.objLog:
self.objLog.log('Deleting message', 'INFO')
else:
print 'Deleting message'
sms.delete()
# Send any messages in queue
if self.queue.qsize() > 0:
number, message = self.queue.get()
# self.m.send(number, message)
if self.objLog:
self.objLog.log('Sending message to %s with message: %s' % (number, str(message)[:160]), 'INFO')
self.m.send(number, str(message)[:160])
if len(message) > 160:
if self.objLog:
# noinspection PyPep8
self.objLog.log('Sending parial message 160-320 to %s: %s' % (number, str(message)[160:][:160]),
'INFO')
time.sleep(10)
self.m.send(number, str(message)[160:][:160])
if len(message) > 320:
if self.objLog:
# noinspection PyPep8
self.objLog.log(
'Sending parial message 320-480 to %s: %s' % (number, str(message)[320:][:160]), 'INFO')
time.sleep(10)
self.m.send(number, str(message)[320:][:160])
# Pause for next poll
time.sleep(self.check_int)
def stop(self):
self.running = False
def send(self, number, message):
""" Add sms message to send """
self.queue.put((number, message))
class DongleTCPRequestHandler(SocketServer.BaseRequestHandler):
""" BaseRequestHandler uses TCPserver and does the actual work """
def handle(self):
import json
data = self.request.recv(1024)
# Check if json
try:
data = json.loads(data)
except Exception:
pass
else:
self.server.dongle.send(data)
print(get_datetime() + ": Sending SMS - " + str(data))
response = "ok"
self.request.sendall(response)
class DongleTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, request_handler_class, dongle_object):
""" Extend init to handle Dongle object """
self.dongle = dongle_object
SocketServer.TCPServer.__init__(self, server_address, request_handler_class)
|
silksuit.py | #!/usr/bin/python3
"""A basic threading example | rzfeeser@alta3.com"""
# Make a thread that simulates a NASA count down
# waits a 1 seconds at the bottom of each loop
## Python standard library
import threading
## py standard library
import time
def groundcontrol():
for i in range(10, -1, -1):
print(i)
time.sleep(1)
print("Orion you are primed for launch. Count down begins...")
## Create a thread object (target is the function to call)
mythread = threading.Thread(target=groundcontrol)
## begin the thread
mythread.start()
|
sensor.py | """Sensor to monitor incoming/outgoing phone calls on a Fritz!Box router."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
import queue
from threading import Event as ThreadingEvent, Thread
from time import sleep
from fritzconnection.core.fritzmonitor import FritzMonitor
import voluptuous as vol
from homeassistant.backports.enum import StrEnum
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import Event, HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
ATTR_PREFIXES,
CONF_PHONEBOOK,
CONF_PREFIXES,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PHONEBOOK,
DEFAULT_PORT,
DEFAULT_USERNAME,
DOMAIN,
FRITZ_STATE_CALL,
FRITZ_STATE_CONNECT,
FRITZ_STATE_DISCONNECT,
FRITZ_STATE_RING,
FRITZBOX_PHONEBOOK,
ICON_PHONE,
MANUFACTURER,
SERIAL_NUMBER,
UNKNOWN_NAME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=3)
class CallState(StrEnum):
"""Fritz sensor call states."""
RINGING = "ringing"
DIALING = "dialing"
TALKING = "talking"
IDLE = "idle"
# Deprecated in Home Assistant 2022.3
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PHONEBOOK, default=DEFAULT_PHONEBOOK): cv.positive_int,
vol.Optional(CONF_PREFIXES): vol.All(cv.ensure_list, [cv.string]),
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import the platform into a config entry."""
_LOGGER.warning(
"Configuration of the AVM FRITZ!Box Call Monitor sensor platform in YAML "
"is deprecated and will be removed in Home Assistant 2022.5; "
"Your existing configuration has been imported into the UI automatically "
"and can be safely removed from your configuration.yaml file"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the fritzbox_callmonitor sensor from config_entry."""
fritzbox_phonebook = hass.data[DOMAIN][config_entry.entry_id][FRITZBOX_PHONEBOOK]
phonebook_name = config_entry.title
phonebook_id = config_entry.data[CONF_PHONEBOOK]
prefixes = config_entry.options.get(CONF_PREFIXES)
serial_number = config_entry.data[SERIAL_NUMBER]
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
name = f"{fritzbox_phonebook.fph.modelname} Call Monitor {phonebook_name}"
unique_id = f"{serial_number}-{phonebook_id}"
sensor = FritzBoxCallSensor(
name=name,
unique_id=unique_id,
fritzbox_phonebook=fritzbox_phonebook,
prefixes=prefixes,
host=host,
port=port,
)
async_add_entities([sensor])
class FritzBoxCallSensor(SensorEntity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, unique_id, fritzbox_phonebook, prefixes, host, port):
"""Initialize the sensor."""
self._state: CallState = CallState.IDLE
self._attributes = {}
self._name = name.title()
self._unique_id = unique_id
self._fritzbox_phonebook = fritzbox_phonebook
self._prefixes = prefixes
self._host = host
self._port = port
self._monitor = None
async def async_added_to_hass(self) -> None:
"""Connect to FRITZ!Box to monitor its call state."""
await super().async_added_to_hass()
await self.hass.async_add_executor_job(self._start_call_monitor)
self.async_on_remove(
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self._stop_call_monitor
)
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect from FRITZ!Box by stopping monitor."""
await super().async_will_remove_from_hass()
await self.hass.async_add_executor_job(self._stop_call_monitor)
def _start_call_monitor(self) -> None:
"""Check connection and start callmonitor thread."""
_LOGGER.debug("Starting monitor for: %s", self.entity_id)
self._monitor = FritzBoxCallMonitor(
host=self._host,
port=self._port,
sensor=self,
)
self._monitor.connect()
def _stop_call_monitor(self, event: Event | None = None) -> None:
"""Stop callmonitor thread."""
if (
self._monitor
and self._monitor.stopped
and not self._monitor.stopped.is_set()
and self._monitor.connection
and self._monitor.connection.is_alive
):
self._monitor.stopped.set()
self._monitor.connection.stop()
_LOGGER.debug("Stopped monitor for: %s", self.entity_id)
def set_state(self, state: CallState) -> None:
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def name(self):
"""Return name of this sensor."""
return self._name
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self._fritzbox_phonebook is not None
@property
def native_value(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON_PHONE
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._prefixes:
self._attributes[ATTR_PREFIXES] = self._prefixes
return self._attributes
@property
def device_info(self) -> DeviceInfo:
"""Return device specific attributes."""
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer=MANUFACTURER,
model=self._fritzbox_phonebook.fph.modelname,
name=self._fritzbox_phonebook.fph.modelname,
sw_version=self._fritzbox_phonebook.fph.fc.system_version,
)
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._unique_id
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self._fritzbox_phonebook is None:
return UNKNOWN_NAME
return self._fritzbox_phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self._fritzbox_phonebook is not None:
self._fritzbox_phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.connection = None
self.stopped = ThreadingEvent()
self._sensor = sensor
def connect(self):
"""Connect to the Fritz!Box."""
_LOGGER.debug("Setting up socket connection")
try:
self.connection = FritzMonitor(address=self.host, port=self.port)
kwargs = {"event_queue": self.connection.start()}
Thread(target=self._process_events, kwargs=kwargs).start()
except OSError as err:
self.connection = None
_LOGGER.error(
"Cannot connect to %s on port %s: %s", self.host, self.port, err
)
def _process_events(self, event_queue):
"""Listen to incoming or outgoing calls."""
_LOGGER.debug("Connection established, waiting for events")
while not self.stopped.is_set():
try:
event = event_queue.get(timeout=10)
except queue.Empty:
if not self.connection.is_alive and not self.stopped.is_set():
_LOGGER.error("Connection has abruptly ended")
_LOGGER.debug("Empty event queue")
continue
else:
_LOGGER.debug("Received event: %s", event)
self._parse(event)
sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == FRITZ_STATE_RING:
self._sensor.set_state(CallState.RINGING)
att = {
"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime,
"from_name": self._sensor.number_to_name(line[3]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_CALL:
self._sensor.set_state(CallState.DIALING)
att = {
"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime,
"to_name": self._sensor.number_to_name(line[5]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_CONNECT:
self._sensor.set_state(CallState.TALKING)
att = {
"with": line[4],
"device": line[3],
"accepted": isotime,
"with_name": self._sensor.number_to_name(line[4]),
}
self._sensor.set_attributes(att)
elif line[1] == FRITZ_STATE_DISCONNECT:
self._sensor.set_state(CallState.IDLE)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
|
presubmit_support.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(self._long_text)
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
return self._cached_new_contents[:]
def ChangedContents(self):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
if self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
self._cached_changed_contents = []
line_num = 0
for line in self.GenerateScmDiff().splitlines():
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
self._cached_changed_contents.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception as e:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
raise type(e)('Evaluation of %s failed: %s' % (function_name, e))
if sink:
elapsed_time = time_time() - start_time
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
if committing:
sys.stdout.write('Running presubmit commit checks ...\n')
else:
sys.stdout.write('Running presubmit upload checks ...\n')
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('Presubmit checks passed.\n')
elif should_prompt:
sys.stdout.write('There were presubmit warnings. ')
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
catalogizer.py | from collections import defaultdict
from datetime import datetime
from os import path, unlink
import logging
import threading
from dal.configuration.tags import AUDIO_TAG_PATTERNS, IMAGE_TAG_PATTERNS, SUBTITLE_TAG_PATTERNS, TAG_ANY, \
TAG_ANY_PATTERN, TAG_END_SEPARATOR, TAG_START_SEPARATOR, VIDEO_TAG_PATTERNS
from bll.mediacatalog.audiocollector import AudioCollector
from bll.mediacatalog.audiofilterfactory import AudioFilterFactory
from bll.mediacatalog.imagecollector import ImageCollector
from bll.mediacatalog.imagefilterfactory import ImageFilterFactory
from bll.mediacatalog.videocollector import VideoCollector
from bll.mediacatalog.videofilterfactory import VideoFilterFactory
from indexing.collectible import Collectible
from indexing.indexer import Indexer
from indexing.indexerpolicy import IndexerPolicy
from indexing.pathpatternanalyzer import PathPatternAnalyzer
from indexing.tagconfig import TagConfig
class Catalogizer:
"""
Responsible for refreshing the media database either by synchronizing or completely rebuilding it.
"""
####################################################################################################################
# Public constants.
####################################################################################################################
STATUS_COMPLETED = 0
STATUS_IN_PROGRESS = 1
STATUS_NOT_RUNNING = 2
STATUS_STARTED = 3
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, context):
### Validate parameters.
if context is None:
raise Exception('context cannot be None.')
if context.database_config is None:
raise Exception('database_config cannot be None.')
if context.indexing_config is None:
raise Exception('indexing_config cannot be None.')
if context.media_dal is None:
raise Exception('media_dal cannot be None.')
if context.media_dal.audio_data_handler is None:
raise Exception('audio_data_handler cannot be None.')
if context.media_dal.image_data_handler is None:
raise Exception('image_data_handler cannot be None.')
if context.media_dal.video_data_handler is None:
raise Exception('video_data_handler cannot be None.')
### Attributes from outside.
self._database_config = context.database_config
self._indexing_config = context.indexing_config
self._audio_dal = context.media_dal.audio_data_handler
self._image_dal = context.media_dal.image_data_handler
self._video_dal = context.media_dal.video_data_handler
### Private attributes.
# A boolean value that indicates whether a synchronization process is running currently.
self._is_process_running = False
# This lock is used to synchronize the database synchronization processes.
self._synchronization_lock_object = threading.Lock()
####################################################################################################################
# Properties.
####################################################################################################################
@property
def status(self):
if self._is_process_running:
return Catalogizer.STATUS_IN_PROGRESS
return Catalogizer.STATUS_NOT_RUNNING
####################################################################################################################
# Public methods.
####################################################################################################################
def rebuild_database(self):
with self._synchronization_lock_object:
try:
return self._rebuild_database()
except Exception as exception:
logging.error('Failed to rebuild media database. %s', exception)
def rebuild_database_async(self, callback=None):
return self._start_async_process(self.rebuild_database, callback)
def renew_database(self):
with self._synchronization_lock_object:
# Check if database exists and check whether it is recent ...
is_rebuild_needed = False
if not path.exists(self._database_config.path_media):
is_rebuild_needed = True
else:
database_age = datetime.fromtimestamp(path.getmtime(self._database_config.path_media))
time_since_last_update = datetime.now() - database_age
seconds_since_last_update = int(time_since_last_update.total_seconds())
if seconds_since_last_update > self._database_config.lifetime:
is_rebuild_needed = True
# ... and if not, recreate it.
if is_rebuild_needed is True:
self.rebuild_database()
return True
def synchronize_database(self):
with self._synchronization_lock_object:
try:
return self._synchronize_database()
except Exception as exception:
logging.error('Failed to synchronize media database. %s', exception)
def synchronize_database_async(self, callback=None):
return self._start_async_process(self.synchronize_database, callback)
####################################################################################################################
# Private methods -- Indexing.
####################################################################################################################
def _configure_indexer(self, indexer, collector, filter_factory, tag_config, rules, collectible_tag=None):
collectibles = []
path_pattern_analyzer = PathPatternAnalyzer()
for directory, rules_for_dir in self._group_rules_by_directory(rules).items():
for rule in rules_for_dir:
pattern = path_pattern_analyzer.parse(tag_config, rule.pattern)
collectible = Collectible(rule.extensions, pattern, collectible_tag)
collectibles.append(collectible)
indexer_policy = IndexerPolicy(collector, collectibles, filter_factory)
indexer_policy.tag_any = TAG_ANY
indexer.add_rule(directory, indexer_policy)
def _index_all_files(self):
with self._audio_dal.db_context.get_connection_provider():
self._index_audio_files()
with self._image_dal.db_context.get_connection_provider():
self._index_image_files()
with self._video_dal.db_context.get_connection_provider():
self._index_video_files()
def _index_audio_files(self, sync_only=False):
config = self._indexing_config.audio
if config is None:
return
audio_collector = AudioCollector(self._audio_dal)
audio_filter_factory = AudioFilterFactory(self._audio_dal, sync_only)
tag_config = TagConfig(TAG_START_SEPARATOR, TAG_END_SEPARATOR, (TAG_ANY, TAG_ANY_PATTERN), AUDIO_TAG_PATTERNS)
indexer = Indexer()
self._configure_indexer(indexer, audio_collector, audio_filter_factory, tag_config, config.rules)
indexer.index()
def _index_image_files(self, sync_only=False):
config = self._indexing_config.image
if config is None:
return
image_collector = ImageCollector(self._image_dal)
image_filter_factory = ImageFilterFactory(self._image_dal, sync_only)
tag_config = TagConfig(TAG_START_SEPARATOR, TAG_END_SEPARATOR, (TAG_ANY, TAG_ANY_PATTERN), IMAGE_TAG_PATTERNS)
indexer = Indexer()
self._configure_indexer(indexer, image_collector, image_filter_factory, tag_config, config.rules)
indexer.index()
def _index_video_files(self, sync_only=False):
config = self._indexing_config.video
if config is None:
return
video_collector = VideoCollector(self._video_dal)
video_filter_factory = VideoFilterFactory(self._video_dal, config.ignore_revisions, sync_only)
video_tag_config = TagConfig(
TAG_START_SEPARATOR, TAG_END_SEPARATOR, (TAG_ANY, TAG_ANY_PATTERN), VIDEO_TAG_PATTERNS)
subtitle_tag_config = TagConfig(
TAG_START_SEPARATOR, TAG_END_SEPARATOR, (TAG_ANY, TAG_ANY_PATTERN), SUBTITLE_TAG_PATTERNS)
indexer = Indexer()
self._configure_indexer(
indexer,
video_collector, video_filter_factory, video_tag_config,
config.video_rules, 'video')
self._configure_indexer(
indexer,
video_collector, video_filter_factory, subtitle_tag_config,
config.subtitle_rules, 'subtitle')
indexer.index()
def _group_rules_by_directory(self, rules):
result = defaultdict(list)
for rule in rules:
result[rule.directory].append(rule)
return result
####################################################################################################################
# Private methods -- Database manipulation.
####################################################################################################################
def _clear_caches(self):
self._audio_dal.clear_cache()
self._image_dal.clear_cache()
self._video_dal.clear_cache()
def _create_database(self):
self._audio_dal.creator.create_db()
self._image_dal.creator.create_db()
self._video_dal.creator.create_db()
def _delete_database(self):
if path.exists(self._database_config.path_media) is True:
unlink(self._database_config.path_media)
def _rebuild_database(self):
if self._is_process_running:
return Catalogizer.STATUS_IN_PROGRESS
self._is_process_running = True
try:
self._delete_database()
self._clear_caches()
self._create_database()
self._index_all_files()
finally:
self._is_process_running = False
return Catalogizer.STATUS_COMPLETED
def _synchronize_database(self):
if self._is_process_running:
return Catalogizer.STATUS_IN_PROGRESS
self._is_process_running = True
try:
self._index_audio_files(True)
self._index_image_files(True)
self._index_video_files(True)
finally:
self._is_process_running = False
return Catalogizer.STATUS_COMPLETED
####################################################################################################################
# Private methods -- Asynchronous operations.
####################################################################################################################
def _execute_function(self, func, callback):
result = func()
if callback is not None:
callback(result)
def _start_async_process(self, func, callback=None):
if self._is_process_running:
return Catalogizer.STATUS_IN_PROGRESS
try:
thread = threading.Thread(target=self._execute_function, args=(func, callback))
thread.start()
except Exception as exception:
logging.error('Failed to execute asynchronous operation. %s', exception)
return Catalogizer.STATUS_STARTED
|
save_tiles_hdf5_dense.py | #!/usr/bin/env python
import multiprocessing
from subprocess import call
import csv
try:
import mapnik2 as mapnik
except:
import mapnik
import sys, os, random as rd
import tensorflow as tf, cv2
import numpy as np
import h5py
# Define some parameters
# layers = ['complete','amenity', 'barriers','bridge','buildings','landcover','landuse','natural','others','roads','text','water']
layers = ['s2v']
save_dir = '/images'
initial_row = 0 # The first row to process
dataset_name = "routes"
num_threads = 4
zoom_levels = [17,18,19,20]
num_items = len(zoom_levels)
size = 0.0005
hdf5_filename = save_dir + '/' + 'london_center.hdf5'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def save_data(data, layers, location):
global num_items
global header
''' Data will have this structure
class
Node (A numpy array with the size [item, layer, jpg_string])
'''
# The hdf5 filename will have
dt = h5py.special_dtype(vlen=np.dtype('uint8'))
path = '/' + location['loc_id']
with h5py.File(hdf5_filename, 'a') as f:
dset = f.create_dataset(path, (num_items,len(layers),), dtype=dt)
for item in range(0,num_items):
for l, layer in enumerate(layers):
img = data[(item, layer)]
dset[item, l] = np.frombuffer(img, dtype='uint8')
class RenderThread:
def __init__(self, q, printLock):
self.q = q
self.maxZoom = 1
self.printLock = printLock
self.width = 256
self.height = 256
def rendertiles(self, cpoint, data, item, layer, teta, zoom):
# target projection
merc = mapnik.Projection('+proj=aeqd +ellps=WGS84 +lat_0=90 +lon_0='+str(teta))
#merc = mapnik.Projection('+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over')
# WGS lat/long source projection of centrel
longlat = mapnik.Projection('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# make a new Map object for the given mapfile
m = mapnik.Map(self.width, self.height)
#mapfile = "/map_data/styles/bs_" + layer + ".xml"
mapfile = "/map_data/styles/" + layer + ".xml"
mapnik.load_map(m, mapfile)
# ensure the target map projection is mercator
m.srs = merc.params()
# transform the centre point into the target coord sys
centre = mapnik.Coord(cpoint[0], cpoint[1])
transform = mapnik.ProjTransform(longlat, merc)
merc_centre = transform.forward(centre)
# 360/(2**zoom) degrees = 256 px
# so in merc 1px = (20037508.34*2) / (256 * 2**zoom)
# hence to find the bounds of our rectangle in projected coordinates + and - half the image width worth of projected coord units
dx = ((20037508.34*2*(self.width/2)))/(256*(2 ** (zoom)))
minx = merc_centre.x - dx
maxx = merc_centre.x + dx
# grow the height bbox, as we only accurately set the width bbox
m.aspect_fix_mode = mapnik.aspect_fix_mode.ADJUST_BBOX_HEIGHT
bounds = mapnik.Box2d(minx, merc_centre.y-10, maxx, merc_centre.y+10) # the y bounds will be fixed by mapnik due to ADJUST_BBOX_HEIGHT
m.zoom_to_box(bounds)
# render the map image to a file
# mapnik.render_to_file(m, output)
#render the map to an image
im = mapnik.Image(self.width,self.height)
mapnik.render(m, im)
img = im.tostring('png256')
#img = cv2.imdecode(np.fromstring(img, dtype=np.uint8), 1)
#img =np.asarray(img)
data[(item, layer)]= img
def loop(self):
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(bounds, data, item, layer, teta, zoom) = r
self.rendertiles(bounds, data, item, layer, teta, zoom)
self.printLock.acquire()
self.printLock.release()
self.q.task_done()
def render_location(locations):
global size
global num_items
global num_threads
global layers
global initial_row
location = locations[initial_row:len(locations)]
for x, location in enumerate(locations):
print('saving node {}/{}'.format(x, len(locations), end='\r'))
lat = float(location['lat'])
lon = float(location['lon'])
cpoint = [lon, lat]
with multiprocessing.Manager() as manager:
data = manager.dict() # Create a list that can be shared between processes
queue = multiprocessing.JoinableQueue(32)
printLock = multiprocessing.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread( queue, printLock)
render_thread = multiprocessing.Process(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
#---Generate num_tems images from shifting in the range [0,0.8*size] and rotating
for item in range(0,len(zoom_levels)):
shift_lat = 0 # No shift
shift_lon = 0 # No shift
# Read osm_yaw and osm_yaw, if they don't match adjust
#osm_yaw = float(location['osm_yaw'])
gsv_yaw = float(location['gsv_yaw'])
#if abs(osm_yaw-gsv_yaw) > 90:
# teta = -180 -1 * gsv_yaw
#else:
# teta = -1 * gsv_yaw
teta = -1 * gsv_yaw
zoom = zoom_levels[item]
for layer in layers:
new_cpoint = [cpoint[0]+shift_lon, cpoint[1]+shift_lat]
t = (new_cpoint, data, item, layer, teta, zoom)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
save_data(data, layers, location)
#if x == 10:
# break
# Open the csv with the location information
locations = []
with open('/map_data/london_new_locations_not_none.csv') as csvfile:
reader = csv.DictReader(csvfile)
header = reader.fieldnames
for row in reader:
locations.append(row)
print("{} Pointes were found".format(len(locations)))
print(locations[0])
render_location(locations)
sys.stdout.flush()
|
word2vec_optimized.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
test_athenad.py | #!/usr/bin/env python3
import json
import os
import requests
import tempfile
import time
import threading
import queue
import unittest
from multiprocessing import Process
from pathlib import Path
from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
from selfdrive.athena import athenad
from selfdrive.athena.athenad import dispatcher
from selfdrive.athena.tests.helpers import MockWebsocket, MockParams, MockApi, EchoSocket, with_http_server
from cereal import messaging
class TestAthenadMethods(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.SOCKET_PORT = 45454
athenad.ROOT = tempfile.mkdtemp()
athenad.Params = MockParams
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = set([cls.SOCKET_PORT])
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
def test_getMessage(self):
with self.assertRaises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
def send_thermal():
messaging.context = messaging.Context()
pub_sock = messaging.pub_sock("thermal")
start = time.time()
while time.time() - start < 1:
msg = messaging.new_message('thermal')
pub_sock.send(msg.to_bytes())
time.sleep(0.01)
p = Process(target=send_thermal)
p.start()
time.sleep(0.1)
try:
thermal = dispatcher["getMessage"]("thermal")
assert thermal['thermal']
finally:
p.terminate()
def test_listDataDirectory(self):
print(dispatcher["listDataDirectory"]())
@with_http_server
def test_do_upload(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
item = athenad.UploadItem(path=fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
with self.assertRaises(requests.exceptions.ConnectionError):
athenad._do_upload(item)
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
self.assertEqual(resp.status_code, 201)
finally:
os.unlink(fn)
@with_http_server
def test_uploadFileToUrl(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
self.assertEqual(not_exists_resp, 404)
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
try:
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
self.assertEqual(resp['enqueued'], 1)
self.assertDictContainsSubset({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}, resp['item'])
self.assertIsNotNone(resp['item'].get('id'))
self.assertEqual(athenad.upload_queue.qsize(), 1)
finally:
athenad.upload_queue = queue.Queue()
os.unlink(fn)
@with_http_server
def test_upload_handler(self, host):
fn = os.path.join(athenad.ROOT, 'qlog.bz2')
Path(fn).touch()
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
athenad.upload_queue.put_nowait(item)
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
os.unlink(fn)
def test_cancelUpload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
self.assertIn(item.id, athenad.cancelled_uploads)
end_event = threading.Event()
thread = threading.Thread(target=athenad.upload_handler, args=(end_event,))
thread.start()
try:
now = time.time()
while time.time() - now < 5:
if athenad.upload_queue.qsize() == 0 and len(athenad.cancelled_uploads) == 0:
break
self.assertEqual(athenad.upload_queue.qsize(), 0)
self.assertEqual(len(athenad.cancelled_uploads), 0)
finally:
end_event.set()
athenad.upload_queue = queue.Queue()
def test_listUploadQueue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id')
athenad.upload_queue.put_nowait(item)
try:
items = dispatcher["listUploadQueue"]()
self.assertEqual(len(items), 1)
self.assertDictEqual(items[0], item._asdict())
finally:
athenad.upload_queue = queue.Queue()
@mock.patch('selfdrive.athena.athenad.create_connection')
def test_startLocalProxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
ws_send = queue.Queue()
mock_ws = MockWebsocket(ws_recv, ws_send)
mock_create_connection.return_value = mock_ws
echo_socket = EchoSocket(self.SOCKET_PORT)
socket_thread = threading.Thread(target=echo_socket.run)
socket_thread.start()
athenad.startLocalProxy(end_event, 'ws://localhost:1234', self.SOCKET_PORT)
ws_recv.put_nowait(b'ping')
try:
recv = ws_send.get(timeout=5)
assert recv == (b'ping', ABNF.OPCODE_BINARY), recv
finally:
# signal websocket close to athenad.ws_proxy_recv
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
def test_getSshAuthorizedKeys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
self.assertEqual(keys, MockParams().params["GithubSshKeys"].decode('utf-8'))
def test_jsonrpc_handler(self):
end_event = threading.Event()
thread = threading.Thread(target=athenad.jsonrpc_handler, args=(end_event,))
thread.daemon = True
thread.start()
athenad.payload_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
try:
resp = athenad.response_queue.get(timeout=3)
self.assertDictEqual(resp.data, {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
finally:
end_event.set()
thread.join()
if __name__ == '__main__':
unittest.main()
|
parallel_autobalancing.py | import signal
import os
import logging
#from multiprocessing_logging import install_mp_handler
from multiprocessing import Process, cpu_count
from subprocess import call
from docopt import docopt
# Workflow:
# Start Mongo
# Launch high level python controller script
# Launch external processes
# Wait for controller to finish optimization
# kill workers
# kill mongo
def main(benchmarking_episodes, mcts_budget, max_evals):
mongo_call = lambda: call(['mongod', '--dbpath', '.', '--port', '1234', '--directoryperdb'], stdout=open(os.devnull, 'w'))
high_level_call = lambda: call(['python', 'cool_game_regym_hyperopt.py', benchmarking_episodes, mcts_budget, max_evals, '--use_mongo'])
worker_call = lambda: call(['hyperopt-mongo-worker', '--mongo=localhost:1234/foo_db', '--poll-interval=0.1'])
mongo_p = Process(target=mongo_call)
mongo_p.start()
high_level_p = Process(target=high_level_call)
high_level_p.start()
workers_p = [Process(target=worker_call)
for _ in range(cpu_count())]
for p in workers_p: p.start()
high_level_p.join()
for p in workers_p:
os.kill(p.pid, signal.SIGKILL)
# loop above doesn't seem to work
call(['killall', 'hyperopt-mongo-worker'])
os.kill(mongo_p.pid, signal.SIGTERM)
if __name__ == '__main__':
usage = '''
Usage:
parallel_autobalancing.py BENCHMARK_EPISODES MCTS_BUDGET MAX_EVALS
Arguments:
BENCHMARK_EPISODES Number of episodes that will be run per matchup
to compute winrates between bots
MCTS_BUDGET Number of MCTS iterations for each agent
MAX_EVALS Target number of parameters updates
'''
arguments = docopt(usage)
benchmarking_episodes, mcts_budget, max_evals = arguments['BENCHMARK_EPISODES'], arguments['MCTS_BUDGET'], arguments['MAX_EVALS']
main(benchmarking_episodes, mcts_budget, max_evals)
|
process.py | # -*- coding: utf-8 -*-
# Import python libs
import logging
import os
import time
import sys
import multiprocessing
import signal
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(os.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) == type(tgt) and issubclass(tgt, multiprocessing.Process):
p = tgt(*args, **kwargs)
else:
p = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
p.start()
log.debug("Started '{0}'(*{1}, **{2} with pid {3}".format(tgt,
args,
kwargs,
p.pid))
self._process_map[p.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': p}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
# in case someone died while we were waiting...
self.check_children()
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in self._process_map.iteritems():
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
for pid, p_map in self._process_map.items():
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in self._process_map.items():
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
|
sideinputs.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for handling side inputs."""
# pytype: skip-file
import collections
import logging
import queue
import threading
import traceback
from apache_beam.coders import observable
from apache_beam.io import iobase
from apache_beam.runners.worker import opcounters
from apache_beam.transforms import window
from apache_beam.utils.sentinel import Sentinel
# This module is experimental. No backwards-compatibility guarantees.
# Maximum number of reader threads for reading side input sources, per side
# input.
MAX_SOURCE_READER_THREADS = 15
# Number of slots for elements in side input element queue. Note that this
# value is intentionally smaller than MAX_SOURCE_READER_THREADS so as to reduce
# memory pressure of holding potentially-large elements in memory. Note that
# the number of pending elements in memory is equal to the sum of
# MAX_SOURCE_READER_THREADS and ELEMENT_QUEUE_SIZE.
ELEMENT_QUEUE_SIZE = 10
# Special element value sentinel for signaling reader state.
READER_THREAD_IS_DONE_SENTINEL = Sentinel.sentinel
# Used to efficiently window the values of non-windowed side inputs.
_globally_windowed = window.GlobalWindows.windowed_value(None).with_value
_LOGGER = logging.getLogger(__name__)
class PrefetchingSourceSetIterable(object):
"""Value iterator that reads concurrently from a set of sources."""
def __init__(
self,
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
self.sources = sources
self.num_reader_threads = min(max_reader_threads, len(self.sources))
# Queue for sources that are to be read.
self.sources_queue = queue.Queue()
for source in sources:
self.sources_queue.put(source)
# Queue for elements that have been read.
self.element_queue = queue.Queue(ELEMENT_QUEUE_SIZE)
# Queue for exceptions encountered in reader threads; to be rethrown.
self.reader_exceptions = queue.Queue()
# Whether we have already iterated; this iterable can only be used once.
self.already_iterated = False
# Whether an error was encountered in any source reader.
self.has_errored = False
self.read_counter = read_counter or opcounters.NoOpTransformIOCounter()
self.element_counter = element_counter
self.reader_threads = []
self._start_reader_threads()
def add_byte_counter(self, reader):
"""Adds byte counter observer to a side input reader.
Args:
reader: A reader that should inherit from ObservableMixin to have
bytes tracked.
"""
def update_bytes_read(record_size, is_record_size=False, **kwargs):
# Let the reader report block size.
if is_record_size:
self.read_counter.add_bytes_read(record_size)
if isinstance(reader, observable.ObservableMixin):
reader.register_observer(update_bytes_read)
def _start_reader_threads(self):
for _ in range(0, self.num_reader_threads):
t = threading.Thread(target=self._reader_thread)
t.daemon = True
t.start()
self.reader_threads.append(t)
def _reader_thread(self):
# pylint: disable=too-many-nested-blocks
try:
while True:
try:
source = self.sources_queue.get_nowait()
if isinstance(source, iobase.BoundedSource):
for value in source.read(source.get_range_tracker(None, None)):
if self.has_errored:
# If any reader has errored, just return.
return
if isinstance(value, window.WindowedValue):
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
else:
# Native dataflow source.
with source.reader() as reader:
# The tracking of time spend reading and bytes read from side
# inputs is kept behind an experiment flag to test performance
# impact.
self.add_byte_counter(reader)
returns_windowed_values = reader.returns_windowed_values
for value in reader:
if self.has_errored:
# If any reader has errored, just return.
return
if returns_windowed_values:
self.element_queue.put(value)
else:
self.element_queue.put(_globally_windowed(value))
except queue.Empty:
return
except Exception as e: # pylint: disable=broad-except
_LOGGER.error(
'Encountered exception in PrefetchingSourceSetIterable '
'reader thread: %s',
traceback.format_exc())
self.reader_exceptions.put(e)
self.has_errored = True
finally:
self.element_queue.put(READER_THREAD_IS_DONE_SENTINEL)
def __iter__(self):
# pylint: disable=too-many-nested-blocks
if self.already_iterated:
raise RuntimeError(
'Can only iterate once over PrefetchingSourceSetIterable instance.')
self.already_iterated = True
# The invariants during execution are:
# 1) A worker thread always posts the sentinel as the last thing it does
# before exiting.
# 2) We always wait for all sentinels and then join all threads.
num_readers_finished = 0
try:
while True:
try:
with self.read_counter:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
if num_readers_finished == self.num_reader_threads:
return
else:
if self.element_counter:
self.element_counter.update_from(element)
yield element
self.element_counter.update_collect()
else:
yield element
finally:
if self.has_errored:
raise self.reader_exceptions.get()
except GeneratorExit:
self.has_errored = True
raise
finally:
while num_readers_finished < self.num_reader_threads:
element = self.element_queue.get()
if element is READER_THREAD_IS_DONE_SENTINEL:
num_readers_finished += 1
for t in self.reader_threads:
t.join()
def get_iterator_fn_for_sources(
sources,
max_reader_threads=MAX_SOURCE_READER_THREADS,
read_counter=None,
element_counter=None):
"""Returns callable that returns iterator over elements for given sources."""
def _inner():
return iter(
PrefetchingSourceSetIterable(
sources,
max_reader_threads=max_reader_threads,
read_counter=read_counter,
element_counter=element_counter))
return _inner
class EmulatedIterable(collections.Iterable):
"""Emulates an iterable for a side input."""
def __init__(self, iterator_fn):
self.iterator_fn = iterator_fn
def __iter__(self):
return self.iterator_fn()
|
oec_main.py | from tkinter import *
from tkinter.ttk import *
import dialog as dlg
from interface import *
from syncutil import SrcPath
import threading
import logging
from random import randint
from tkinter import messagebox
import tempfile
from subprocess import call
from typing import List
import webbrowser
import os
EDITOR = os.environ.get('EDITOR', 'vim') \
if os.name == 'posix' \
else ''
# TODO Missing major documentation. Please fix
def grid(widget, sticky=W+E+N+S, **kw):
"""
Calls grid manager with our default style parameters.
"""
widget.grid(sticky=sticky, **kw)
return widget
FOCUS = 0, ""
class AppStyle(Style):
def __init__(self, parent="clam"):
super().__init__()
# self.theme_use(parent)
style_dict = {
"padding": 3,
"foreground": "#002e4d",
"background": "#eeeef0",
"fieldbackground": "#eeeef0",
"selectbackground": "#002e4d",
"highlightcolor": "#6a86b4",
"lightcolor": "#eeeef0",
"darkcolor": "#002e4d",
"relief": "#6a86b4",
}
self.configure(".", **style_dict, font=('Helvetica', 10))
# self.configure("TLabel", foreground="black", background="?")
# self.configure("TButton", foreground="?", background="?")
# self.configure("TCheckbutton", foreground="?", background="?")
# self.configure("TCombobox", foreground="?", background="?")
# self.configure("TEntry", foreground="?", background="?")
# self.configure("TFrame", foreground="?", background="?")
# self.configure("TLabel", foreground="?", background="?")
# self.configure("TLabelFrame", foreground="?", background="?")
# self.configure("TMenubutton", foreground="?", background="?")
# self.configure("TNotebook", foreground="?", background="?")
# self.configure("TPanedwindow", foreground="?", background="?")
# self.configure("Horizontal.TProgressbar", foreground="?", background="?")
# self.configure("Vertical.TProgressbar", foreground="?", background="?")
# self.configure("TRadiobutton", foreground="?", background="?")
# self.configure("Horizontal.TScale", foreground="?", background="?")
# self.configure("Vertical.TScale", foreground="?", background="?")
# self.configure("Horizontal.TScrollbar", foreground="?", background="?")
# self.configure("Vertical.TScrollbar", foreground="?", background="?")
# self.configure("TSeparator", foreground="?", background="?")
# self.configure("TSizegrip", foreground="?", background="?")
self.configure("Treeview", **style_dict,
rowheight=25,
ipadx=1, ipady=1, padx=1, pady=1)
class ConsoleView(Frame):
def __init__(self, master: Widget):
Frame.__init__(self, master)
self.grid()
class VertHoriScrolledTree(Frame):
def __init__(self, master: Widget, name):
Frame.__init__(self, master)
# Frames
vert_scroll_tree = Scrollbar(self, orient=VERTICAL)
hori_scroll_tree = Scrollbar(self, orient=HORIZONTAL)
tree = Treeview(self, yscrollcommand=vert_scroll_tree.set,
xscrollcommand=hori_scroll_tree.set)
vert_scroll_tree.configure(command=tree.yview)
hori_scroll_tree.configure(command=tree.xview)
vert_scroll_tree.pack(fill=Y, side=RIGHT, expand=FALSE)
hori_scroll_tree.pack(fill=X, side=BOTTOM, expand=FALSE)
tree.xview_moveto(0)
tree.yview_moveto(0)
column_options = {
"minwidth": 50,
"width": 60,
}
tree["columns"] = ("Value", "Error", "IsLimit", "Unit")
tree.column("Value", **column_options)
tree.column("Error", **column_options)
tree.column("IsLimit", **column_options)
tree.column("Unit", **column_options)
tree.heading("#0", text="Planets")
tree.heading("Value", text="Value")
tree.heading("Error", text="Error")
tree.heading("IsLimit", text="IsLimit")
tree.heading("Unit", text="Unit")
tree.pack(side=LEFT, fill=BOTH, expand=TRUE)
self.interior = tree
self.name = name
class WaitFrame(Frame):
def __init__(self, master):
Frame.__init__(self, master)
path_gears = SrcPath.abs("gui", "gears.gif")
gears_image = PhotoImage(file=path_gears)
gears_label = Label(self, image=gears_image)
gears_label.image = gears_image # don't remove this line!
gears_label.place(relx=0.5, rely=0.5, anchor=CENTER)
class VerticalScrolledTree(Frame):
def __init__(self, master: "OECSyncApp", type):
Frame.__init__(self, master)
# All the system updates as treeview
vert_scroll_tree = Scrollbar(self, orient=VERTICAL)
tree = Treeview(self, yscrollcommand=vert_scroll_tree.set)
vert_scroll_tree.configure(command=tree.yview)
vert_scroll_tree.pack(fill=Y, side=RIGHT, expand=FALSE)
tree.xview_moveto(0)
tree.yview_moveto(0)
tree.pack(side=LEFT, fill=BOTH, expand=TRUE)
tree.heading("#0", text='Systems')
tree.bind("<Button-1>", self.on_selected)
self.interior = tree
self.master = master
self.frames = {}
self.type = type if type in ["remote", "local"] else "remote"
self.fitem = None
self.last_frame = None
self.cur_frame = None
def populate(self, requests: [str]):
if requests and len(requests) > 0:
i = 0
for request_name in requests:
self.interior.insert("", i, text=request_name)
i += 1
def create_frame(self, system: PlanetarySysUpdate):
frame = VertHoriScrolledTree(self.master, system.name)
self.last_frame.destroy() if self.last_frame is not None else None
for j, planet in enumerate(system.planets):
identity = frame.interior.insert("", 0, planet.name,
text=planet.name, open=True)
for field, value in planet.fields.items():
frame.interior.insert(identity, "end",
planet.name + field, text=field,
values=(value.value, value.error,
value.is_limit, value.unit))
frame.grid(row=2, column=0, columnspan=3, rowspan=4, sticky="nsew")
frame.tkraise()
self.last_frame = frame
return frame
def on_selected(self, event):
# fetch this object from the object list
item = self.interior.identify("item", event.x, event.y)
req_name = self.interior.item(item)["text"]
self.on_req_selected(req_name)
def on_req_selected(self, req_name):
id_, req, type_ = Interface.find_system(req_name)
self.create_frame(req.updates)
mainview = self.master.vw
def set_message(*ignored):
req.message = mainview.vw_message.var.get()
# print(req.message)
def set_title(*ignored):
req.title = mainview.vw_message_title.var.get()
# print(req.title)
mainview.vw_message.var.set(req.message)
mainview.vw_message.set_observer(set_message)
mainview.vw_message_title.var.set(req.title)
mainview.vw_message_title.set_observer(set_title)
global FOCUS
FOCUS = id_, type_
def delete_frame(self, id):
# remove from dictionary of frame and list of systems
sys_name = Interface.get_system_from_index(id)
Interface.delete_system_at_index(id)
class RequestHistoryView(Frame):
"""
"""
def __init__(self, master: Tk):
Frame.__init__(self, master)
grid(Label(master, text="Local Requests"), row=2, column=3)
grid(Label(master, text="Remote Requests"), row=4, column=3)
self.local_requests = VerticalScrolledTree(master, "local")
self.remote_requests = VerticalScrolledTree(master, "remote")
local_requests, remote_requests = Interface.populate_request_list()
self.local_requests.populate(local_requests)
self.remote_requests.populate(remote_requests)
self.local_requests.grid(row=3, column=3, sticky=N + E + S + W)
self.remote_requests.grid(row=5, column=3, sticky=N + E + S + W)
def delete(self, id):
return self.local_requests.delete_frame(id)
class EntryView(Frame):
"""
Entry View
"""
def __init__(self, master: Tk, label: str, var: StringVar):
Frame.__init__(self, master)
self.var = var
self.trace = None
self.columnconfigure(0, minsize=80, weight=0)
self.columnconfigure(1, minsize=80, weight=1)
self.label = grid(Label(self, text=label),
column=0, row=0)
self.entry = Entry(self, textvariable=var)
self.entry.anchor('ne')
self.entry.insert(INSERT, var.get())
self.entry.insert(END, "")
grid(self.entry, column=1, row=0)
def set_observer(self, callback):
if self.trace:
self.var.trace_vdelete("w", self.trace)
self.trace = self.var.trace_variable("w", callback)
class MainView(Frame):
"""
Main view contains all the widgets.
"""
def __init__(self, master: Tk):
Frame.__init__(self, master)
# TODO Toggle View for remote Requests. What does view mean?
# column0, row7 with column span
reject_button = Button(master, text="REJECT",
command=
lambda: self.submit_with_busy_bar(reject=True))
self.reject_button_grid = grid(reject_button, row=7, column=0)
# Menu bar
self.menu = Menu(master)
self.menu.add_command(label='Exit', command=self.on_quit)
master.config(menu=self.menu)
# Banner
self.banner = grid(
Label(master,
text="OEC Synchronizer",
font="Helvetica 15 bold"),
padx=0, pady=0,
row=0, column=0,
columnspan=3)
# Message title view, row6, column1 with column span
# Starts at row 5, column starts at 1 and ends at 2
self.vw_message_title = grid(EntryView(master,
"Title",
StringVar()),
padx=0, pady=0,
row=6, column=1, columnspan=3)
# Message view, row6, column1 with column span
# Starts at row 7, column starts at 1 and ends at 2
self.vw_message = grid(EntryView(master,
"Message",
StringVar()),
padx=0, pady=0,
row=7, column=1, columnspan=3)
# History view, row2, column3.
# Starts at row 2 - Label is at row 2, content starts at row 3
requests_view = RequestHistoryView(master)
self.requests = requests_view
self.vw_history = grid(requests_view,
padx=0, pady=0,
row=2, column=3, rowspan=3)
# column0, row5 with column span
send_button = Button(master, text="SEND",
command=self.submit_with_busy_bar)
self.send_button_grid = grid(send_button, row=6, column=0)
sync_button = Button(master, text="SYNC",
command=self.sync_with_busy_bar)
self.sync_button_grid = grid(sync_button, row=0, column=3)
wait_vw = WaitFrame(master)
self.vw_planet = grid(wait_vw, padx=0, pady=0, row=2, column=0,
columnspan=3, rowspan=4)
# indefinite progress bar
self.console = grid(ConsoleView(master), padx=0, pady=0, row=8,
column=0, columnspan=4)
self.after(1000, self.sync_with_busy_bar)
def on_quit(self):
quit()
def submit(self, progress: Progressbar, reject: bool=False, ) -> None:
"""
The subroutine to submit/reject an update request
"""
# get the index of the clicked object
message, url = None, None
selected_req, type = FOCUS
if reject:
if type == "remote":
messagebox.showerror("Not Allowed", "Cannot reject remote "
"request")
progress.master.destroy()
progress.destroy()
else:
message, url = Interface.reject(selected_req)
else:
if type == "remote":
messagebox.showerror("Not Allowed", "Cannot send remote "
"request")
progress.master.destroy()
progress.destroy()
else:
message, url = Interface.send(selected_req, self.edit)
if ((message is not None and message != "") and
(url is not None and url != "")):
if messagebox.askokcancel(message, "View PR on github?"):
webbrowser.open(url)
# delete the object from the local request list
self.requests.delete(selected_req)
# refresh the remote and local requests
get_updates = self.load_requests(progress)
get_updates.start()
else:
progress.master.destroy()
progress.destroy()
def load_requests(self, progress: Progressbar):
def show__requests():
requests_view = RequestHistoryView(self.master)
vw_history = grid(requests_view, padx=0, pady=0, row=2, column=3,
rowspan=3)
firstsysname = Interface.get_system_from_index()
requests_view.local_requests.on_req_selected("local-"+firstsysname)
progress.master.destroy()
progress.destroy()
return requests_view, vw_history
get_updates = threading.Thread(group=None,
target=show__requests,
name="get_updates")
return get_updates
def sync_with_busy_bar(self):
def task():
ft = Frame(self.master)
grid(ft, row=8, column=0, columnspan=4)
bar = Progressbar(ft, orient='horizontal',
mode='indeterminate')
bar.start(50)
grid(bar, row=8, column=0, columnspan=4)
t1 = threading.Thread(target=lambda: Interface.
sync(self.load_requests(bar).start))
t1.start()
task()
def submit_with_busy_bar(self, reject: bool=False):
def task():
ft = Frame(self.master)
grid(ft, row=8, column=0, columnspan=4)
bar = Progressbar(ft, orient='horizontal',
mode='indeterminate')
bar.start(50)
grid(bar, row=8, column=0, columnspan=4)
t1 = threading.Thread(target=lambda: self.
submit(bar, reject))
t1.start()
task()
def edit(self, content: str) -> str:
tf_name = ""
with tempfile.NamedTemporaryFile(suffix=".xml",
delete=False) as tf:
tf_name = tf.name
tf.write(content.encode('utf-8'))
proceed = messagebox.askyesnocancel("Edit", "Edit the file? "
"This will launch the default"
" editor '%s'" % EDITOR)
if proceed is None:
return
elif (proceed == YES):
while True:
if os.name == 'posix':
call([EDITOR, tf_name])
else:
call(['cmd.exe', '/c', tf_name])
if messagebox.askyesno("Confirm",
"Finished editing and submit?"):
break
with open(tf_name, 'r', encoding='utf-8') as f:
new_content = f.read()
return new_content
# TODO Implement Help
# TODO Implement Search Update Request
# TODO Implement write to logfile instead of console esp if it was launch
# by clicking
class OECSyncApp(Tk):
"""
Window definition; set window properties, decide which columns and rows
in grid
"""
def __init__(self):
Tk.__init__(self)
style = AppStyle()
# TODO insert some space filler at row 6 and 7 column 3
self.title('OEC Synchronizer')
self.grid()
self.minsize(width=400, height=300) # min size
ws = self.winfo_screenwidth() # width of the screen
hs = self.winfo_screenheight() # height of the screen
win_width = 8.0/14.0 * ws # default width
win_height = 10.0/14.0 * hs # default height
x_position = (ws / 2) - (win_width / 2)
y_position = (hs / 2) - (win_height / 2)
# set the dimensions of the scree and position
self.geometry('%dx%d+%d+%d' % (win_width, win_height, x_position,
y_position))
# default size
self.configure(background="gray64")
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, minsize=200, weight=1)
self.rowconfigure(3, weight=1)
self.rowconfigure(5, weight=1)
self.vw = MainView(self)
self.lift() # place above all window
class WelcomeWindow(Tk):
# TODO Documnet this class
def __init__(self):
Tk.__init__(self)
self.overrideredirect(1) # eliminate title bar
ws = self.winfo_screenwidth() # width of the screen
hs = self.winfo_screenheight() # height of the screen
welcome_screen = ["Logo1_800.gif", "Logo2_800.gif"]
path_welcome_screen = SrcPath.abs("gui", welcome_screen[randint(0, 1)])
background_image = PhotoImage(file=path_welcome_screen)
win_width = 13.9/14.0 * background_image.width() # eliminates root bg
win_height = 13.9/14.0 * background_image.height()
# calculate x and y coordinates for the Tk root window
x_position = (ws * 1.0 / 2.0) - (win_width * 1.0 / 2.0)
y_position = (hs * 1.0 / 2.0) - (win_height * 1.0 / 2.0)
background_label = Label(self, image=background_image)
background_label.image = background_image # don't remove this line!
# eliminates root bezels
background_label.place(relx=0.5, rely=0.5, anchor=CENTER)
self.geometry('%dx%d+%d+%d' % (win_width, win_height, x_position,
y_position))
self.wait_visibility()
self.lift() # place above all window
self.after(2000, self.destroy)
def launch(config: str):
"""
Launches the GUI.
:param config: config file
"""
try:
logging.debug("Init interface")
init = threading.Thread(target=Interface.init, args=(config,))
init.start()
logging.debug("Init welcome window")
welcome = WelcomeWindow()
welcome.mainloop()
init.join()
app = OECSyncApp()
app.mainloop()
except Exception as e:
dlg.error(e)
if __name__ == '__main__':
launch()
|
ducoapi.py | ##########################################
# Duino-Coin API Module
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2021
##########################################
import ast
from requests import get
import requests
import socket
import json
import hashlib
import urllib.request
from threading import Timer
import threading
import time, os, sys
import queue
miner_q = queue.Queue()
#====================================# Vars #====================================#
TRANSACTIONS_URL = "http://51.15.127.80/transactions.json"
API_URL = "http://51.15.127.80/api.json"
SERVER_URL = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt"
duco_price = 0.003
socket.setdefaulttimeout(10)
#====================================# common functions #====================================#
def decode_response(rec):
return rec.decode().split(",")
#====================================# Duco Balances #====================================#
def User_Balance(username):
value = 0.0
jsonapi = get("https://server.duinocoin.com/balances.json", data = None)
if jsonapi.status_code == 200:
content = jsonapi.content.decode()
contentjson = json.loads(content)
value = float(contentjson.get(username)[:-5])
print(value)
return value
#====================================# Duco Transactions #====================================#
class transaction_data:
def __init__(self, data, token):
self.data = data
self.token = token
self.obj = (self.token, self.data)
self.all = (self.token, self.data)
self.time = self.data["Time"]
self.amount = self.data["Amount"]
self.recipient = self.data["Recipient"]
self.sender = self.data["Sender"]
class user_data:
def __init__(self, data):
self.data = data
def diction(self):
"""Returns all transactions"""
return self.data
def tokens(self):
"""Returns all tokens"""
return self.data.keys()
def token(self, token):
"""Searches by token"""
for item in self.data.keys():
info = self.data[item]
# Checks whether the token is the one that's being searched
if item == str(token):
# It is, so return the transaction data of it
return transaction_data(info, item)
def time(self, time):
"""Searches by time"""
for item in self.data.keys():
info = self.data[item]
# Checks whether the 'Time' is equal to the one searched
if info['Time'] == str(time):
# It is, so return the transaction data of it
return transaction_data(info, item)
def sender(self, sender):
"""Searches by sender"""
for item in self.data.keys():
info = self.data[item]
# Checks whether the sender's the one being searched
if info['Sender'] == str(sender):
# It is, so return the transaction data of it
return transaction_data(info, item)
def recipient(self, recipient):
"""Searches by recipient"""
for item in self.data.keys():
info = self.data[item]
# Checks whether the recipient's the one being searched
if info['Recipient'] == str(recipient):
# It is, so return the transaction data of it
return transaction_data(info, item)
def amount(self, amount):
"""Searches by amount"""
for item in self.data.keys():
info = self.data[item]
# Checks whether the amount is the one being searched
if info['Amount'] == str(amount):
# It is, so return the transaction data of it
return transaction_data(info, item)
class transactions:
def __init__(self):
# Gets response from transactions.json on the masterserver
response = requests.get(TRANSACTIONS_URL, data=None)
# Checks whether data has been sent (http-code = ok)
if response.status_code == 200:
# The response was "ok" so get data from response body (content)
data1 = (response.content.decode())
else:
# The response was anything als than "ok", so report the error
raise ConnectionError("Could not connect to server")
try:
# Tries to convert the received data into a dictionary.
self.data = ast.literal_eval(data1)
except:
# The convertion failed, so report the error
raise Exception("Data cant be converted into a dict. please retry")
# No username has been specified up until this point, so set it to "None"
self.username = None
def total_transactions(self):
# Returns the amount of transactions
return len(self.data)
def print(self):
# Iterates through every transactions
for trans in self.data.keys():
# and prints it
print(self.data[trans])
def all(self):
# Creates a userdata-instance based on the data provided
return user_data(data=self.data)
def all_time_transacted(self):
# returnes the all time transacted amount
transactions = self.all()
transactions_token = list(transactions.tokens())
total = 0
for trans_token in transactions_token:
total += float(transactions.token(trans_token).amount)
return total
def user_transactions(self, username=None):
# Checks whether a 'valid' username was specified.
if username != None:
# It was, so set the class-variable to it
self.username = username
elif self.username == None:
# The username was None, so print an 'error'
raise ValueError("Please provide a username")
sent = {}
# Iterates through every transaction available
for trans in self.data.keys():
# Stores the details of a transaction
info = (self.data[trans])
# And checks whether the 'Sender' was the specified user
if info['Sender'] == self.username:
# It was, so the info gets pushed to the 'sent' variable
sent.update({trans: info})
# return sent
return user_data(data=sent)
def user_transaction_qty(self, username=None):
"""Gets total amount of user sends"""
# Checks whether there's a valid username provided
if username != None:
# It is, so the class-variable gets set
self.username = username
elif self.username == None:
# The username can't be valid, so print an 'error'/warning
raise ValueError("Please provide a username")
# Counts the transactions sent by the user
count = 0
# Iterates through every transaction available
for trans in self.data.keys():
# Stores the details in this variable
info = (self.data[trans])
# And then checks whether the 'Sender' is the user specified
if info['Sender'] == self.username:
# It is the user specified, so add 1 to the transaction-counter
count += 1
return count
def user_reciepts(self, username=None):
# Checks whether there's a valid username provided
if username != None:
# It is, so the class-variable gets set
self.username = username
elif self.username == None:
# The username can't be valid, so print an 'error'/warning
raise ValueError("Please provide a username")
# Stores the received transactions
sent = {}
# Iterates through every transaction available
for trans in self.data.keys():
# Stores their details in this variable
info = (self.data[trans])
# Checks whether the 'Recipient' is the user specified
if info['Recipient'] == self.username:
# It is the user, so push the info to the variable
sent.update({trans: info})
# Returns userdata based on received transactions
return user_data(data=sent)
def user_reciept_qty(self, username=None):
"""Gets total amount of user reciepts"""
# Checks whether there's a valid username provided
if username != None:
# It is, so the class-variable gets set
self.username = username
elif self.username == None:
# The username can't be valid, so print an 'error'/warning
raise ValueError("Please provide a username")
# Counts how many transactions have been received
count = 0
# Iterates through every transaction available
for trans in self.data.keys():
# Stores their details in this variable
info = (self.data[trans])
# Checks whether the 'Recipient' is the user specified
if info['Recipient'] == self.username:
# It is the user, so add 1 to the receive-counter
count += 1
# Return the amount of received transactions.
return count
def total_duco_sent(self, username=None):
"""Gets total amount of duco sent by user"""
# Checks whether there's a valid username provided
if username != None:
# It is, so the class-variable gets set
self.username = username
elif self.username == None:
# The username can't be valid, so print an 'error'/warning
raise ValueError("Please provide a username")
# Stores how many ducos the user has sent
total = 0
# Gets the transactions the user has sent
transactions = self.user_transactions()
# Iterates through the sent transactions
for token in transactions.tokens():
# Gets the transaction details
info = transactions.token(token=token)
# Adds the sent amount to the total
total += float(info.amount)
# Returns the total duco sent
return total
def total_duco_received(self, username=None):
"""Gets total amount of duco recieved by user"""
# Checks whether there's a valid username provided
if username != None:
# It is, so the class-variable gets set
self.username = username
elif self.username == None:
# The username can't be valid, so print an 'error'/warning
raise ValueError("Please provide a username")
# Stores how many ducos the user has received
total = 0
# Gets every transaction the user has received
transactions = self.user_reciepts()
# Iterates through the received transactions
for token in transactions.tokens():
# Gets the transaction details
info = transactions.token(token=token)
# Adds the received amount to the total
total += float(info.amount)
# Returns the total amount of ducos received
return total
#====================================# Duco Api #====================================#
def get_duco_price():
"""
A function for getting the current price of DUCO
"""
api_response = get(API_URL)
if api_response.status_code == 200:
duco_price = round(api_response.json()["Duco price"], 6)
else:
duco_price = .003
return duco_price
def start_duco_price_timer(tkinter_label=None, interval=15):
"""
A function that starts a timer with a specified interval and updates duco_price variable with the current price.
Arguments:
tkinter_label: Tkinter label that will be updated with the price (optional)
interval: Interval between price updates (default: 15)
"""
global duco_price
api_response = get(API_URL)
if api_response.status_code == 200:
duco_price = round(api_response.json()["Duco price"], 6)
else:
duco_price = .003
if tkinter_label:
tkinter_label.set(f"1 Duco = ${duco_price}")
Timer(interval, start_duco_price_timer, args=(tkinter_label, interval)).start()
class api_actions:
"""
A class that provides an interface for interacting with the DUCO server
"""
def __init__(self):
"""
A class constructor that initiates the connection with the server.
"""
serverinfo = get(SERVER_URL).text.splitlines()
self.pool_address = serverinfo[0]
self.pool_port = int(serverinfo[1])
self.sock = socket.socket()
self.sock.connect((self.pool_address, self.pool_port))
self.sock.recv(3)
self.username = None
self.password = None
def register(self, username, password, email):
"""
A function for registering an account
"""
self.sock.send(f"REGI,{username},{password},{email}".encode())
register_result = decode_response(self.sock.recv(128))
if 'NO' in register_result:
raise Exception(register_result[1])
return register_result
def Pools(self):
"""
A function for getting a list of pools
"""
self.sock.send("POOLList".encode())
register_result = decode_response(self.sock.recv(1024))
return register_result
def login(self, username, password):
"""
A function for logging into an account
"""
self.username = username
self.password = password
self.sock.send(f"LOGI,{username},{password}".encode())
login_result = decode_response(self.sock.recv(64))
if 'NO' in login_result:
raise Exception(login_result[1])
return login_result
def logout(self):
"""
A function for disconnecting from the server
"""
self.sock.close()
def balance(self):
"""
A function for getting account balance
"""
if not self.password or not self.username:
raise Exception("User not logged in")
self.sock.send("BALA".encode())
user_balance = self.sock.recv(1024).decode()
return user_balance
def transfer(self, recipient_username, amount, memo="DucoAPI"):
"""
A function for transfering balance between two accounts
"""
if not self.password or not self.username:
raise Exception("User not logged in")
self.sock.send(f"SEND,-,{recipient_username},{amount},{memo}".encode())
transfer_response = self.sock.recv(128).decode()
return transfer_response
def transferRest(self, recipient_username, amount, memo="DucoAPI"):
"""
A function for transfering balance between two accounts using the Rest API
"""
if not self.password or not self.username:
raise Exception("User not logged in")
url = f"https://server.duinocoin.com/transaction/?username={self.username}&password={self.password}&recipient={recipient_username}&amount={amount}&memo={memo}"
result = get(url)
result = result.json()
return result.get('result')
def getTransactions(self, amount):
"""
A function for get last (amount) of transactions
"""
if not self.password or not self.username:
raise Exception("User not logged in")
self.sock.send(f"GTXL,{self.username},{amount}".encode())
transactions = self.sock.recv(1024).decode()
return json.loads(json.dumps(transactions))
def reset_pass(self, old_password, new_password):
"""
A function for resetting the password of an account
"""
if not self.password or not self.username:
raise Exception("User not logged in")
self.sock.send(f"CHGP,{old_password},{new_password}".encode())
reset_password_response = self.sock.recv(128).decode()
return reset_password_response
def close(self):
"""
A function for disconnecting from the server
"""
self.sock.close()
#====================================# Duco Miner #====================================#
class miner:
def __init__(self):
self.username = None
self.UseLowerDiff = True
self.stopVar = False
self.workers = 1
self.last_job = {}
def start(self, username=None, workers=None):
if username != None:
self.username = username
elif self.username == None:
raise ValueError("Please provide a username")
if workers != None:
self.workers = int(workers)
for worker in range(self.workers):
x = threading.Thread(target=self.worker)
x.start()
def stop(self):
self.stopVar = True
def worker(self): # Mining section\
soc = socket.socket()
with urllib.request.urlopen(SERVER_URL) as content:
# Read content and split into lines
content = content.read().decode().splitlines()
# Line 1 = IP
pool_address = content[0]
# Line 2 = port
pool_port = content[1]
# This section connects and logs user to the server
soc.connect((str(pool_address), int(pool_port)))
server_version = soc.recv(3).decode() # Get server version
print("Server is on version", server_version)
while True:
if self.stopVar == True:
print("Stopping Worker")
break
if self.UseLowerDiff:
# Send job request for lower diff
soc.send(bytes(
"JOB,"
+ str(self.username)
+ ",MEDIUM",
encoding="utf8"))
else:
# Send job request
soc.send(bytes(
"JOB,"
+ str(self.username),
encoding="utf8"))
# Receive work
job = soc.recv(1024).decode().rstrip("\n")
# Split received data to job and difficulty
job = job.split(",")
difficulty = job[2]
hashingStartTime = time.time()
for result in range(100 * int(difficulty) + 1):
# Calculate hash with difficulty
ducos1 = hashlib.sha1(
str(
job[0]
+ str(result)
).encode("utf-8")).hexdigest()
# If hash is even with expected hash result
if job[1] == ducos1:
hashingStopTime = time.time()
timeDifference = hashingStopTime - hashingStartTime
hashrate = result / timeDifference
# Send numeric result to the server
soc.send(bytes(
str(result)
+ ","
+ str(hashrate)
+ ",Minimal_PC_Miner",
encoding="utf8"))
# Get feedback about the result
feedback = soc.recv(1024).decode().rstrip("\n")
# If result was good
if feedback == "GOOD":
miner_q.put({'Status': "Accepted share",
'Result': result,
"Hashrate": int(hashrate/1000),
"Difficulty": difficulty})
# print("Accepted share",
# result,
# "Hashrate",
# int(hashrate/1000),
# "kH/s",
# "Difficulty",
# difficulty)
break
# If result was incorrect
elif feedback == "BAD":
miner_q.put({'Status': "Rejected share",
'Result': result,
"Hashrate": int(hashrate/1000),
"Difficulty": difficulty})
# print("Rejected share",
# result,
# "Hashrate",
# int(hashrate/1000),
# "kH/s",
# "Difficulty",
# difficulty)
break
if __name__ == '__main__':
print(transactions().all_time_transacted())
# miner_class = miner()
# miner_class.start(username="connorhess")
# for i in range(10):
# print(miner_q.get())
# time.sleep(1)
# miner_class.stop()
|
test_gc.py | import unittest
from test.test_support import verbose, run_unittest
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n") in d
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (2, 0, 0))
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = False
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
for t in threads:
t.start()
time.sleep(1.0)
exit = True
for t in threads:
t.join()
finally:
sys.setcheckinterval(old_checkinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + range(5))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(u"a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print "restoring automatic collection"
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
runner_enumerative_z3_only.py | # Run enumerative model counting with Z3.
# Get model count at every minute until timeout.
import argparse as ap
import sys
import os
import re
import math
import multiprocessing
import time
from allSat import get_models
from utils.Shell import Shell
from utilities import get_abc_result_line
from z3 import *
parser = ap.ArgumentParser(description = 'Run Quacky on AWS IAM policies')
parser.add_argument('-d', '--dir', help = 'Policy Directory', required = True)
parser.add_argument('-v', '--verbose', help = 'Verbose', required = False, action = 'store_true')
parser.add_argument('-c', '--constraints', help = 'use resource type constraints', required = False, action = 'store_true')
parser.add_argument('-e', '--enc', help = 'use action encoding', required = False, action = 'store_true')
parser.add_argument('-b', '--bound', help = 'Bound', required = True)
parser.add_argument('-t', '--timeout', help ='Timeout (sec)', required = True)
args = parser.parse_args()
single_policy_dir = os.fsencode('../samples/' + args.dir + '/exp_single/')
def call_solvers(path, policy):
shell = Shell()
# Translate policies into SMT constraint formula
cmd = 'python3 translator.py -p1 {}/{} -s'.format(path, policy)
if args.constraints:
cmd += ' -c'
if args.enc:
cmd += ' -e'
out, err = shell.runcmd(cmd)
if args.verbose:
print(out, err)
ifile = open('output_1.smt2', 'r')
formula = ifile.read()
ifile.close()
LOOKUP = '(check-sat)'
formula = formula.replace(LOOKUP, '(assert (<= (str.len resource) {}))\n'.format(args.bound) + LOOKUP)
ofile = open('output_1.smt2', 'w')
ofile.write(formula)
ofile.close()
z3_time = time.time()
f = parse_smt2_file('output_1.smt2')
result, models = get_models(f, int(args.timeout))
z3_time = (time.time() - z3_time) * 1000
z3_count = models
# output path, Z3
print('|[{}/{}]({}/{})|{}|{}|'.format(path, policy, path, policy, z3_count, z3_time))
print(result)
for dir in os.listdir(single_policy_dir):
path = os.fsdecode(single_policy_dir) + os.fsdecode(dir)
for file in os.listdir(path):
policy = os.fsdecode(file)
if policy.endswith('.json'):
t = multiprocessing.Process(target = call_solvers, args=(path, policy))
t.start()
t.join(timeout = float(args.timeout) * 1.5)
if t.is_alive():
t.terminate()
print('|[{}/{}]({}/{})|{}|{}|'.format(path, policy, path, policy, '0', int(args.timeout) * 1000))
|
tensorboard.py | "Provides convenient callbacks for Learners that write model images, metrics/losses, stats and histograms to Tensorboard"
from ..basic_train import Learner
from ..basic_data import DatasetType, DataBunch
from ..vision import Image
from ..vision.gan import GANLearner
from ..callbacks import LearnerCallback
from ..core import *
from ..torch_core import *
from threading import Thread, Event
from time import sleep
from queue import Queue
import statistics
import torchvision.utils as vutils
from abc import ABC
#This is an optional dependency in fastai. Must install separately.
try: from tensorboardX import SummaryWriter
except: print("To use this tracker, please run 'pip install tensorboardx'. Also you must have Tensorboard running to see results")
__all__=['LearnerTensorboardWriter', 'GANTensorboardWriter', 'ImageGenTensorboardWriter']
#---Example usage (applies to any of the callbacks)---
# proj_id = 'Colorize'
# tboard_path = Path('data/tensorboard/' + proj_id)
# learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=tboard_path, name='GanLearner'))
class LearnerTensorboardWriter(LearnerCallback):
"Broadly useful callback for Learners that writes to Tensorboard. Writes model histograms, losses/metrics, and gradient stats."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100):
super().__init__(learn=learn)
self.base_dir,self.name,self.loss_iters,self.hist_iters,self.stats_iters = base_dir,name,loss_iters,hist_iters,stats_iters
log_dir = base_dir/name
self.tbwriter = SummaryWriter(log_dir=str(log_dir))
self.hist_writer = HistogramTBWriter()
self.stats_writer = ModelStatsTBWriter()
self.graph_writer = GraphTBWriter()
self.data = None
self.metrics_root = '/metrics/'
self._update_batches_if_needed()
def _get_new_batch(self, ds_type:DatasetType)->Collection[Tensor]:
"Retrieves new batch of DatasetType, and detaches it."
return self.learn.data.one_batch(ds_type=ds_type, detach=True, denorm=False, cpu=False)
def _update_batches_if_needed(self)->None:
"one_batch function is extremely slow with large datasets. This is caching the result as an optimization."
if self.learn.data.valid_dl is None: return # Running learning rate finder, so return
update_batches = self.data is not self.learn.data
if not update_batches: return
self.data = self.learn.data
self.trn_batch = self._get_new_batch(ds_type=DatasetType.Train)
self.val_batch = self._get_new_batch(ds_type=DatasetType.Valid)
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
self.stats_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
scalar_value = to_np(last_loss)
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
self.hist_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_scalar(self, name:str, scalar_value, iteration:int)->None:
"Writes single scalar value to Tensorboard."
tag = self.metrics_root + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
#TODO: Relying on a specific hardcoded start_idx here isn't great. Is there a better solution?
def _write_metrics(self, iteration:int, last_metrics:MetricsList, start_idx:int=2)->None:
"Writes training metrics to Tensorboard."
recorder = self.learn.recorder
for i, name in enumerate(recorder.names[start_idx:]):
if last_metrics is None or len(last_metrics) < i+1: return
scalar_value = last_metrics[i]
self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration)
def on_train_begin(self, **kwargs: Any) -> None:
self.graph_writer.write(model=self.learn.model, tbwriter=self.tbwriter,
input_to_model=next(iter(self.learn.data.dl(DatasetType.Single)))[0])
def on_batch_end(self, last_loss:Tensor, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
if iteration % self.loss_iters == 0: self._write_training_loss(iteration=iteration, last_loss=last_loss)
if iteration % self.hist_iters == 0: self._write_weight_histograms(iteration=iteration)
# Doing stuff here that requires gradient info, because they get zeroed out afterwards in training loop
def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
if iteration % self.stats_iters == 0: self._write_model_stats(iteration=iteration)
def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None:
"Callback function that writes epoch end appropriate data to Tensorboard."
self._write_metrics(iteration=iteration, last_metrics=last_metrics)
# TODO: We're overriding almost everything here. Seems like a good idea to question that ("is a" vs "has a")
class GANTensorboardWriter(LearnerTensorboardWriter):
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:GANLearner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500,
stats_iters:int=100, visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters, stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
self.gen_stats_updated = True
self.crit_stats_updated = True
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
generator, critic = self.learn.gan_trainer.generator, self.learn.gan_trainer.critic
self.hist_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='generator')
self.hist_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='critic')
def _write_gen_model_stats(self, iteration:int)->None:
"Writes gradient statistics for generator to Tensorboard."
generator = self.learn.gan_trainer.generator
self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats')
self.gen_stats_updated = True
def _write_critic_model_stats(self, iteration:int)->None:
"Writes gradient statistics for critic to Tensorboard."
critic = self.learn.gan_trainer.critic
self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats')
self.crit_stats_updated = True
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
# We don't want to write stats when model is not iterated on and hence has zeroed out gradients
gen_mode = self.learn.gan_trainer.gen_mode
if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration)
if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
recorder = self.learn.gan_trainer.recorder
if len(recorder.losses) == 0: return
scalar_value = to_np((recorder.losses[-1:])[0])
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard."
trainer = self.learn.gan_trainer
#TODO: Switching gen_mode temporarily seems a bit hacky here. Certainly not a good side-effect. Is there a better way?
gen_mode = trainer.gen_mode
try:
trainer.switch(gen_mode=True)
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch,
iteration=iteration, tbwriter=self.tbwriter)
finally: trainer.switch(gen_mode=gen_mode)
def on_batch_end(self, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, **kwargs)
if iteration == 0: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
#TODO: This could perhaps be implemented as queues of requests instead but that seemed like overkill.
# But I'm not the biggest fan of maintaining these boolean flags either... Review pls.
if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False
if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration)
class ImageGenTensorboardWriter(LearnerTensorboardWriter):
"Callback for non-GAN image generating Learners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100,
visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters,
stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard"
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration,
tbwriter=self.tbwriter)
def on_batch_end(self, iteration:int, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, **kwargs)
if iteration == 0: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
class TBWriteRequest(ABC):
"A request object for Tensorboard writes. Useful for queuing up and executing asynchronous writes."
def __init__(self, tbwriter: SummaryWriter, iteration:int):
super().__init__()
self.tbwriter = tbwriter
self.iteration = iteration
@abstractmethod
def write(self)->None: pass
# SummaryWriter writes tend to block quite a bit. This gets around that and greatly boosts performance.
# Not all tensorboard writes are using this- just the ones that take a long time. Note that the
# SummaryWriter does actually use a threadsafe consumer/producer design ultimately to write to Tensorboard,
# so writes done outside of this async loop should be fine.
class AsyncTBWriter():
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self):
super().__init__()
self.stop_request = Event()
self.queue = Queue()
self.thread = Thread(target=self._queue_processor, daemon=True)
self.thread.start()
def request_write(self, request: TBWriteRequest)->None:
"Queues up an asynchronous write request to Tensorboard."
if self.stop_request.isSet(): return
self.queue.put(request)
def _queue_processor(self)->None:
"Processes queued up write requests asynchronously to Tensorboard."
while not self.stop_request.isSet():
while not self.queue.empty():
if self.stop_request.isSet(): return
request = self.queue.get()
request.write()
sleep(0.2)
#Provided this to stop thread explicitly or by context management (with statement) but thread should end on its own
# upon program exit, due to being a daemon. So using this is probably unecessary.
def close(self)->None:
"Stops asynchronous request queue processing thread."
self.stop_request.set()
self.thread.join()
# Nothing to do, thread already started. Could start thread here to enforce use of context manager
# (but that sounds like a pain and a bit unweildy and unecessary for actual usage)
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback): self.close()
asyncTBWriter = AsyncTBWriter()
class ModelImageSet():
"Convenience object that holds the original, real(target) and generated versions of a single image fed to a model."
@staticmethod
def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]:
"Factory method to convert a batch of model images to a list of ModelImageSet."
image_sets = []
x,y = batch[0],batch[1]
preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True)
for orig_px, real_px, gen in zip(x,y,preds):
orig, real = Image(px=orig_px), Image(px=real_px)
image_set = ModelImageSet(orig=orig, real=real, gen=gen)
image_sets.append(image_set)
return image_sets
def __init__(self, orig:Image, real:Image, gen:Image): self.orig, self.real, self.gen = orig, real, gen
class HistogramTBRequest(TBWriteRequest):
"Request object for model histogram writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.params = [(name, values.clone().detach().cpu()) for (name, values) in model.named_parameters()]
self.name = name
def _write_histogram(self, param_name:str, values)->None:
"Writes single model histogram to Tensorboard."
tag = self.name + '/weights/' + param_name
self.tbwriter.add_histogram(tag=tag, values=values, global_step=self.iteration)
def write(self)->None:
"Writes model histograms to Tensorboard."
for param_name, values in self.params: self._write_histogram(param_name=param_name, values=values)
#If this isn't done async then this is sloooooow
class HistogramTBWriter():
"Writes model histograms to Tensorboard."
def __init__(self): super().__init__()
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model')->None:
"Writes model histograms to Tensorboard."
request = HistogramTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ModelStatsTBRequest(TBWriteRequest):
"Request object for model gradient statistics writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.gradients = [x.grad.clone().detach().cpu() for x in model.parameters() if x.grad is not None]
self.name = name
def _add_gradient_scalar(self, name:str, scalar_value)->None:
"Writes a single scalar value for a gradient statistic to Tensorboard."
tag = self.name + '/gradients/' + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration)
def _write_avg_norm(self, norms:[])->None:
"Writes the average norm of the gradients to Tensorboard."
avg_norm = sum(norms)/len(self.gradients)
self._add_gradient_scalar('avg_norm', scalar_value=avg_norm)
def _write_median_norm(self, norms:[])->None:
"Writes the median norm of the gradients to Tensorboard."
median_norm = statistics.median(norms)
self._add_gradient_scalar('median_norm', scalar_value=median_norm)
def _write_max_norm(self, norms:[])->None:
"Writes the maximum norm of the gradients to Tensorboard."
max_norm = max(norms)
self._add_gradient_scalar('max_norm', scalar_value=max_norm)
def _write_min_norm(self, norms:[])->None:
"Writes the minimum norm of the gradients to Tensorboard."
min_norm = min(norms)
self._add_gradient_scalar('min_norm', scalar_value=min_norm)
def _write_num_zeros(self)->None:
"Writes the number of zeroes in the gradients to Tensorboard."
gradient_nps = [to_np(x.data) for x in self.gradients]
num_zeros = sum((np.asarray(x) == 0.0).sum() for x in gradient_nps)
self._add_gradient_scalar('num_zeros', scalar_value=num_zeros)
def _write_avg_gradient(self)->None:
"Writes the average of the gradients to Tensorboard."
avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients)
self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
def _write_median_gradient(self)->None:
"Writes the median of the gradients to Tensorboard."
median_gradient = statistics.median(x.data.median() for x in self.gradients)
self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
def _write_max_gradient(self)->None:
"Writes the maximum of the gradients to Tensorboard."
max_gradient = max(x.data.max() for x in self.gradients)
self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
def _write_min_gradient(self)->None:
"Writes the minimum of the gradients to Tensorboard."
min_gradient = min(x.data.min() for x in self.gradients)
self._add_gradient_scalar('min_gradient', scalar_value=min_gradient)
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient()
class ModelStatsTBWriter():
"Writes model gradient statistics to Tensorboard."
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model_stats')->None:
"Writes model gradient statistics to Tensorboard."
request = ModelStatsTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ImageTBRequest(TBWriteRequest):
"Request object for model image output writes to Tensorboard."
def __init__(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.image_sets = ModelImageSet.get_list_from_model(learn=learn, batch=batch, ds_type=ds_type)
self.ds_type = ds_type
def _write_images(self, name:str, images:[Tensor])->None:
"Writes list of images as tensors to Tensorboard."
tag = self.ds_type.name + ' ' + name
self.tbwriter.add_image(tag=tag, img_tensor=vutils.make_grid(images, normalize=True), global_step=self.iteration)
def _get_image_tensors(self)->([Tensor], [Tensor], [Tensor]):
"Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images."
orig_images, gen_images, real_images = [], [], []
for image_set in self.image_sets:
orig_images.append(image_set.orig.px)
gen_images.append(image_set.gen.px)
real_images.append(image_set.real.px)
return orig_images, gen_images, real_images
def write(self)->None:
"Writes original, generated and real(target) images to Tensorboard."
orig_images, gen_images, real_images = self._get_image_tensors()
self._write_images(name='orig images', images=orig_images)
self._write_images(name='gen images', images=gen_images)
self._write_images(name='real images', images=real_images)
#If this isn't done async then this is noticeably slower
class ImageTBWriter():
"Writes model image output to Tensorboard."
def __init__(self): super().__init__()
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None:
"Writes training and validation batch images to Tensorboard."
self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid)
self._write_for_dstype(learn=learn, batch=trn_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Train)
def _write_for_dstype(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType)->None:
"Writes batch images of specified DatasetType to Tensorboard."
request = ImageTBRequest(learn=learn, batch=batch, iteration=iteration, tbwriter=tbwriter, ds_type=ds_type)
asyncTBWriter.request_write(request)
class GraphTBRequest(TBWriteRequest):
"Request object for model histogram writes to Tensorboard."
def __init__(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor):
super().__init__(tbwriter=tbwriter, iteration=0)
self.model,self.input_to_model = model,input_to_model
def write(self)->None:
"Writes single model graph to Tensorboard."
self.tbwriter.add_graph(model=self.model, input_to_model=self.input_to_model)
class GraphTBWriter():
"Writes model network graph to Tensorboard."
def write(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor)->None:
"Writes model graph to Tensorboard."
request = GraphTBRequest(model=model, tbwriter=tbwriter, input_to_model=input_to_model)
asyncTBWriter.request_write(request)
|
ctpDataPublisher.py | '''A data feed engine for CTP bridge.
'''
'''
Copyright (c) 2017, WinQuant Information and Technology Co. Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
# built-in modules
import datetime as dt
import logging
import time
import yaml
# third-party modules
import threading
import pandas as pd
import zmq.green as zmq
# customized modules
import datafeed.engine as dEngine
import ctpmd
import ctpUtil
# customize logging configure
logging.basicConfig( format='[%(levelname)s] %(message)s',
level=logging.DEBUG )
class CTPDataPublisher( dEngine.DataPublisher ):
'''Data publisher engine for CTP.
'''
def __init__( self, configPath ):
'''Initialize the data publisher.
Parameters
----------
configPath : str
path to the configure for the data feed including fields
MD_FRONT_ID : str
gateway IP address;
BROKER_ID : str
broker ID;
INVESTOR_ID : str
investor ID;
PASSWORD : str
password to authenticate.
The configure file is encoded as a YAML.
Exceptions
----------
raise
* FileNotFoundError when the given YAML file is not found;
* KeyError if the required fileds are not specified in the configure file.
'''
with open( configPath, 'r' ) as f:
self.config = yaml.load( f.read() )
# initialize the subscriber dict
self.subscribers = {}
# mapping from CTP securities ID's to external names
self.secIds = {}
# mapping from instrument to concerned
self.topicsToSubscribers = {}
# next identifier for the subscriber
self.nextId = 1
ctpmd.login( self.config[ 'MD_FRONT_IP' ], self.config[ 'BROKER_ID' ],
self.config[ 'INVESTOR_ID' ], self.config[ 'PASSWORD' ] )
def connect( self, startDate=dt.date( 2012, 1, 1 ), endDate=dt.date.today() ):
'''Put the data feed engine online and subscribes to the interested topics.
Parameters
----------
startDate : datetime.date
start date of the backtesting;
endDate : datetime.date
end date of the backtesting.
'''
# summarise all the messages to subscribe
fields = set()
for _, s in self.subscribers.items():
t = s.getSubscribedTopics()
f = s.getSubscribedDataFields()
# In case the passed in symbols are securities ID's.
ctpTopics = dict( ( ctpUtil.getCtpInstId( tt ), tt ) for tt in t )
self.secIds.update( ctpTopics )
if not ( f is None or fields is None ):
fields.update( f )
else:
fields = None
topics = self.secIds.keys()
logging.info( 'Subscribe data for {ts:s} and {fs:s}.'.format(
ts=', '.join( list( topics ) ),
fs=', '.join( list( fields ) ) if fields is not None else 'ALL' ) )
ctpmd.connect( self.config[ 'MQ_PUB_ADDR' ] )
logging.info( 'Waiting connection to establish...' )
time.sleep( 1 )
logging.info( 'Setting-up receiver...' )
# set up the receiver
receiver = threading.Thread( target=self.receiveDatafeed, args=( topics, ) )
# subscribe to the market data
ctpmd.subscribeMarketData( list( topics ) )
# wait the event thread to exit
# receiver.join()
return receiver
def addSubscriber( self, subscriber ):
'''Add subscriber to this publisher.
Parameters
----------
subscriber : datafeed.subscriber.Subscriber
Add subscriber to the data publisher.
Returns
-------
subscriberId : int
Identifier of the subscriber.
'''
if subscriber is not None:
subId = self.nextId
self.nextId += 1
self.subscribers[ subId ] = subscriber
# add the subscriber to the topics to subscribers mapping
subscribedTopics = subscriber.getSubscribedTopics()
for topic in subscribedTopics:
if topic not in self.topicsToSubscribers:
self.topicsToSubscribers[ topic ] = set()
logging.debug( 'Add subscriber {sid:d} to topic {t:s}.'.format(
sid=subId, t=topic ) )
self.topicsToSubscribers[ topic ].add( subscriber )
else:
subId = None
return subId
def removeSubscriber( self, subscriberId ):
'''Drop a given subscriber from the publisher.
Parameters
----------
subscriberId : int
Identifier of the subscriber.
Returns
-------
subscriber : datafeed.subscriber.Subscriber
Dropped subscriber.
Exceptions
----------
raise Exception when error occurs.
'''
if subscriberId not in self.subscribers:
raise Exception( 'Requested subscriber does not exist.' )
else:
subscriber = self.subscribers.pop( subscriberId, None )
if subscriber is not None:
# drop the subscriber from the topic to subscribers mapping
subscribedTopics = subscriber.getSubscribedTopics()
for topic in subscribedTopics:
logging.debug( 'Remove subscriber {sid:d} from topic list {t:s}.'.format(
sid=subscriberId, t=topic ) )
self.topicsToSubscribers[ topic ] -= { subscriber, }
return subscriber
def notify( self, subscriberId, data ):
'''Notify one subscriber by the subscriber ID.
Parameters
----------
subscriberId : int
Identifier of the subscriber;
data : pandas.DataFrame
data in pandas.DataFrame feed to the subscriber.
Returns
-------
None.
Exceptions
----------
raise Exception when error occurs.
'''
if subscriberId in self.subscribers:
self.subscribers[ subscriberId ].onData( data )
else:
raise Exception( 'Subscriber with ID {sid:d} does not exist.'.format(
sid=subscriberId ) )
def notifyAll( self, data ):
'''Notify all subscribers with the given data.
Parameters
----------
data : object
data feed to all the subscribers.
Exceptions
----------
raise Exception when error occurs.
'''
# find all the subscribers that care about the datafeed.
secIds = set( data.index )
subscribers = set()
for secId in secIds:
subscribers = subscribers.union( self.topicsToSubscribers.get( secId, set() ) )
if len( subscribers ) > 0:
for s in subscribers:
s.onData( data )
def receiveDatafeed( self, topics ):
'''Receive datafeed from the data publisher.
Parameters
----------
topics : list of str
topics that the receiver concerns.
'''
ctx = zmq.Context()
sock = ctx.socket( zmq.SUB )
# add concerned topics
for topic in topics:
sock.setsockopt_string( zmq.SUBSCRIBE, topic )
# let's connect to the publisher
sock.connect( self.config[ 'MQ_SUB_ADDR' ] )
while True:
# decode raw data
raw = sock.recv().decode( 'utf-8' )
cols = [ 'secId', 'tradeDate', 'price' ]
instId, tradeDatetime, price = raw.split( ',' )
data = [ self.secIds.get( instId, instId ),
dt.datetime.strptime( tradeDatetime, '%Y%m%d %H:%M:%S' ),
float( price ) ]
if len( data ) == len( cols ):
# required data are all available
df = pd.DataFrame( dict( zip( cols, data ) ), index=[ 0 ] )
df.set_index( 'secId', inplace=True )
self.notifyAll( df )
else:
logging.warning( 'Received data {rd:s} which is not enough to use.'.format(
rd=raw ) )
|
msg.py | from utlis.rank import setrank ,isrank ,remrank ,setsudos ,remsudos ,setsudo,IDrank,GPranks
from utlis.send import send_msg, BYusers, sendM,Glang,GetLink
from handlers.delete import delete
from utlis.tg import Bot, Ckuser
from handlers.ranks import ranks
from handlers.locks import locks
from handlers.gpcmd import gpcmd
from handlers.sudo import sudo
from handlers.all import allGP
from utlis.tg import Bot,Del24
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re , json,datetime,importlib
def updateHandlers(client, message,redis):
if redis.get("{}Nbot:bigM".format(BOT_ID)):
return False
type = message.chat.type
try:
userID = message.from_user.id
chatID = message.chat.id
except Exception as e:
return 0
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if (type is "supergroup" or type is "group") and message.outgoing != True:
userID = message.from_user.id
chatID = message.chat.id
rank = isrank(redis,userID,chatID)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
text = message.text
title = message.chat.title
if text and group is False:
if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"):
if text == c.add:
if redis.get("{}Nbot:autoaddbotN".format(BOT_ID)):
auN = int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID)))
else:
auN = 1
if auN >= Bot("getChatMembersCount",{"chat_id":chatID})["result"] and not (rank is "sudo" or rank is "sudos"):
Bot("sendMessage",{"chat_id":chatID,"text":r.Toolow.format((int(redis.get("{}Nbot:autoaddbotN".format(BOT_ID))) or 0)),"reply_to_message_id":message.message_id,"parse_mode":"html"})
return False
GetME = Bot("getChatMember",{"chat_id":chatID,"user_id":BOT_ID})["result"]
if (not GetME["can_change_info"] or not GetME["can_delete_messages"] or not GetME["can_invite_users"] or not GetME["can_restrict_members"] or not GetME["can_pin_messages"] or not GetME["can_promote_members"]):
Bot("sendMessage",{"chat_id":chatID,"text":r.GiveMEall,"reply_to_message_id":message.message_id,"parse_mode":"html"})
return False
if text == c.add and not redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message):
locksarray = {'Llink','Llongtext','Lmarkdown','Linline','Lfiles','Lcontact','Lbots','Lfwd','Lnote'}
for lock in locksarray:
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
ads = Bot("getChatAdministrators",{"chat_id":chatID})
for ad in ads['result']:
userId = ad["user"]["id"]
userFn = ad["user"]["first_name"]
if ad['status'] == "administrator" and int(userId) != int(BOT_ID):
setrank(redis,"admin",userId,chatID,"array")
if ad['status'] == "creator":
setrank(redis,"malk",userId,chatID,"one")
add = redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
Bot("exportChatInviteLink",{"chat_id":chatID})
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/calmaacc")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
sendTO = (redis.get("{}Nbot:sudogp".format(BOT_ID)) or SUDO)
get = (redis.hget("{}Nbot:links".format(BOT_ID),chatID) or GetLink(chatID) or "https://t.me/calmaacc")
kb = InlineKeyboardMarkup([[InlineKeyboardButton("الرابط 🖇", url=get)]])
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,message.from_user.first_name)
Bot("sendMessage",{"chat_id":sendTO,"text":f"تم تفعيل مجموعه جديدة ℹ️\nاسم المجموعه : {title}\nايدي المجموعه : {chatID}\nالمنشئ : {BY}\n⎯ ⎯ ⎯ ⎯","parse_mode":"html","reply_markup":kb})
elif text == c.add and redis.sismember("{}Nbot:disabledgroups".format(BOT_ID),chatID) and Ckuser(message):
redis.sadd("{}Nbot:groups".format(BOT_ID),chatID)
redis.srem("{}Nbot:disabledgroups".format(BOT_ID),chatID)
redis.hdel("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID)
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadd2.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text == c.disabl and Ckuser(message):
Bot("sendMessage",{"chat_id":chatID,"text":r.disabled.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text and group is True:
if (rank is "sudo" or rank is "sudos" or rank is "asudo") or (redis.get("{}Nbot:autoaddbot".format(BOT_ID)) and GPranks(userID,chatID) == "creator"):
if text == c.add and Ckuser(message):
Bot("sendMessage",{"chat_id":chatID,"text":r.doneadded.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown"})
if text == c.disabl and Ckuser(message):
redis.srem("{}Nbot:groups".format(BOT_ID),chatID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),chatID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),chatID,str(NextDay_Date))
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.MoreInfo, url="t.me/calmaacc")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.disabl.format(title),"reply_to_message_id":message.message_id,"parse_mode":"markdown","reply_markup":kb})
if group is True:
t = threading.Thread(target=allGP,args=(client, message,redis))
t.daemon = True
t.start()
if text and group is True:
if redis.sismember("{}Nbot:publicOrders".format(BOT_ID),chatID):
x = redis.smembers("{}Nbot:{}:TXPoeders".format(BOT_ID,chatID))
for x in x:
try:
x = x.split("=")
if re.search(f"^\{x[0]}$", text) or re.search(f"^\{x[0]} (.*)$", text):
text = text.replace(x[0], x[1])
except Exception as e:
print(e)
message.text = text
x = redis.smembers("{}Nbot:{}:TXoeders".format(BOT_ID,chatID))
for x in x:
try:
x = x.split("=")
if re.search(f"^\{x[0]}$", text) or re.search(f"^\{x[0]} (.*)$", text):
text = text.replace(x[0], x[1])
except Exception as e:
print(e)
message.text = text
if (rank is "sudo" or rank is "sudos" or rank is "asudo") and group is True:
t = threading.Thread(target=sudo,args=(client, message,redis))
t.daemon = True
t.start()
if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner") and group is True:
t = threading.Thread(target=ranks,args=(client, message,redis))
t.daemon = True
t.start()
if text and (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True and re.search(c.startlock,text):
if Ckuser(message):
t = threading.Thread(target=locks,args=(client, message,redis))
t.daemon = True
t.start()
if (rank is False or rank is 0) and group is True:
t = threading.Thread(target=delete,args=(client, message,redis))
t.daemon = True
t.start()
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner" or rank is "admin") and group is True:
t = threading.Thread(target=gpcmd,args=(client, message,redis))
t.daemon = True
t.start()
if type is "private" and message.outgoing != True:
text = message.text
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos"):
t = threading.Thread(target=sudo,args=(client, message,redis))
t.daemon = True
t.start()
if text and re.search("^/start$",text):
userID = message.from_user.id
userFN = message.from_user.first_name
redis.sadd("{}Nbot:privates".format(BOT_ID),userID)
if rank == "sudo":
kb = ReplyKeyboardMarkup([[r.RKgp, r.RKgpl],[r.RKaf, r.RKrf],[r.RKf],["جلب نسخه احتياطيه"],[r.RKub]],resize_keyboard=True)
Bot("sendMessage",{"chat_id":chatID,"text":r.sudostart,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb})
return 0
getbot = client.get_me()
kb = InlineKeyboardMarkup([[InlineKeyboardButton("LomaV2", url="t.me/calmaacc")]])
Bot("sendMessage",{"chat_id":chatID,"text":r.botstart.format(getbot.first_name,getbot.username),"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":kb})
if text and re.search("^/start (.*)$",text):
tx = text.replace("/start ","")
split = tx.split("=")
order = split[0]
if order == "showreplylistBOT":
chatId = split[1]
userId = split[2]
TY = split[3]
rank = isrank(redis,userId,chatId)
if (rank == "sudo" or rank is "asudo" or rank == "sudos"):
li = redis.hkeys("{}Nbot:{}".format(BOT_ID,TY))
if li:
i = 1
words = ""
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY+'BOT'),"",userID])),]])
Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if order == "showreplylist":
chatId = split[1]
userId = split[2]
TY = split[3]
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId)
rank = isrank(redis,userId,chatId)
if (rank is not False or rank is not 0 or rank != "vip" or rank != "admin") and group is True:
li = redis.hkeys("{}Nbot:{}:{}".format(BOT_ID,chatId,TY))
if li:
i = 1
words = ""
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2R,callback_data=json.dumps(["del{}".format(TY),chatId,userID])),]])
Bot("sendMessage",{"chat_id":chatID,"text":r.DelallR,"reply_to_message_id":message.message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if order == "showBlocklist":
chatId = split[1]
userId = split[2]
TY = split[3]
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatId)
rank = isrank(redis,userId,chatId)
if (rank is not False or rank is not 0 or rank != "vip") and group is True:
redis.hset("{}Nbot:{}:TXreplys".format(BOT_ID,chatID),tx,text)
li = redis.smembers("{}Nbot:{}:{}".format(BOT_ID,chatId,TY))
if li:
i = 1
words = ""
for ID in li:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Blocklistone,callback_data=json.dumps(["delfromb",TY,userID,chatId])),]])
if TY == "blockanimations":
Bot("sendAnimation",{"chat_id":userId,"animation":ID,"reply_markup":reply_markup})
if TY == "blockSTICKERs":
Bot("sendSticker",{"chat_id":userId,"sticker":ID,"reply_markup":reply_markup})
if TY == "blockphotos":
Bot("sendPhoto",{"chat_id":userId,"photo":ID,"reply_markup":reply_markup})
if TY == "blockTEXTs":
words = words+"\n"+str(i)+" - {"+ID+"}"
i += 1
print(len(words))
if len(words) > 3000:
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
words = ''
if TY == "blockTEXTs":
Bot("sendMessage",{"chat_id":userId,"text":words,"reply_to_message_id":message.message_id,"parse_mode":"html"})
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.Delall2,callback_data=json.dumps(["delBL",TY,userID,chatId])),]])
Bot("sendMessage",{"chat_id":userId,"text":r.Delall,"reply_to_message_id":message.message_id,"parse_mode":"html","reply_markup":reply_markup})
else:
Bot("sendMessage",{"chat_id":userId,"text":r.listempty2,"reply_to_message_id":message.message_id,"parse_mode":"html"})
|
views.py | # project/users/views.py
# IMPORTS
from flask import render_template, Blueprint, request, redirect, url_for, flash, Markup, abort
from sqlalchemy.exc import IntegrityError
from flask_login import login_user, current_user, login_required, logout_user
from itsdangerous import URLSafeTimedSerializer
from threading import Thread
from flask_mail import Message
from datetime import datetime, timedelta
from .forms import RegisterForm, LoginForm, EmailForm, PasswordForm
from project import app, db, mail
from project.models import User
# CONFIG
users_blueprint = Blueprint('users', __name__, template_folder='templates')
# HELPERS
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, html_body):
msg = Message(subject, recipients=recipients)
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for(
'users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
html = render_template(
'email_confirmation.html',
confirm_url=confirm_url)
send_email('Confirm Your Email Address', [user_email], html)
def send_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
password_reset_url = url_for(
'users.reset_with_token',
token=password_reset_serializer.dumps(user_email, salt='password-reset-salt'),
_external=True)
html = render_template(
'email_password_reset.html',
password_reset_url=password_reset_url)
send_email('Password Reset Requested', [user_email], html)
# ROUTES
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.email.data, form.password.data)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
send_confirmation_email(new_user.email)
message = Markup(
"<strong>Success!</strong> Thanks for registering. Please check your email to confirm your email address.")
flash(message, 'success')
return redirect(url_for('home'))
except IntegrityError:
db.session.rollback()
message = Markup(
"<strong>Error!</strong> Unable to process registration.")
flash(message, 'danger')
return render_template('register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.is_correct_password(form.password.data):
if user.is_email_confirmed is not True:
user.authenticated = True
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('users.resend_email_confirmation'), )
if user.is_email_confirmed is True:
user.authenticated = True
user.last_logged_in = user.current_logged_in
user.current_logged_in = datetime.now()
db.session.add(user)
db.session.commit()
login_user(user)
message = Markup(
"<strong>Welcome back!</strong> You are now successfully logged in.")
flash(message, 'success')
return redirect(url_for('home'))
else:
message = Markup(
"<strong>Error!</strong> Incorrect login credentials.")
flash(message, 'danger')
return render_template('login.html', form=form)
@users_blueprint.route('/user_profile', methods=['GET', 'POST'])
@login_required
def user_profile():
return render_template('user_profile.html')
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except:
message = Markup(
"The confirmation link is invalid or has expired.")
flash(message, 'danger')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
message = Markup(
"Account already confirmed. Please login.")
flash(message, 'info')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
message = Markup(
"Thank you for confirming your email address!")
flash(message, 'success')
return redirect(url_for('home'))
@users_blueprint.route('/reset', methods=["GET", "POST"])
def reset():
form = EmailForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=form.email.data).first_or_404()
except:
message = Markup(
"Invalid email address!")
flash(message, 'danger')
return render_template('password_reset_email.html', form=form)
if user.email_confirmed:
send_password_reset_email(user.email)
message = Markup(
"Please check your email for a password reset link.")
flash(message, 'success')
else:
message = Markup(
"Your email address must be confirmed before attempting a password reset.")
flash(message, 'danger')
return redirect(url_for('users.login'))
return render_template('password_reset_email.html', form=form)
@users_blueprint.route('/reset/<token>', methods=["GET", "POST"])
def reset_with_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except:
message = Markup(
"The password reset link is invalid or has expired.")
flash(message, 'danger')
return redirect(url_for('users.login'))
form = PasswordForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=email).first_or_404()
except:
message = Markup(
"Invalid email address!")
flash(message, 'danger')
return redirect(url_for('users.login'))
user.password = form.password.data
db.session.add(user)
db.session.commit()
message = Markup(
"Your password has been updated!")
flash(message, 'success')
return redirect(url_for('users.login'))
return render_template('reset_password_with_token.html', form=form, token=token)
@users_blueprint.route('/admin_view_users')
@login_required
def admin_view_users():
if current_user.role != 'admin':
abort(403)
else:
users = User.query.order_by(User.id).all()
return render_template('admin_view_users.html', users=users)
@users_blueprint.route('/admin_dashboard')
@login_required
def admin_dashboard():
if current_user.role != 'admin':
abort(403)
else:
users = User.query.order_by(User.id).all()
kpi_mau = User.query.filter(User.last_logged_in > (datetime.today() - timedelta(days=30))).count()
kpi_total_confirmed = User.query.filter_by(email_confirmed=True).count()
kpi_mau_percentage = (100 / kpi_total_confirmed) * kpi_mau
return render_template('admin_dashboard.html', users=users, kpi_mau=kpi_mau, kpi_total_confirmed=kpi_total_confirmed, kpi_mau_percentage=kpi_mau_percentage)
@users_blueprint.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
message = Markup("<strong>Goodbye!</strong> You are now logged out.")
flash(message, 'info')
return redirect(url_for('users.login'))
@users_blueprint.route('/password_change', methods=["GET", "POST"])
@login_required
def user_password_change():
form = PasswordForm()
if request.method == 'POST':
if form.validate_on_submit():
user = current_user
user.password = form.password.data
db.session.add(user)
db.session.commit()
message = Markup(
"Password has been updated!")
flash(message, 'success')
return redirect(url_for('users.user_profile'))
return render_template('password_change.html', form=form)
@users_blueprint.route('/resend_confirmation')
@login_required
def resend_email_confirmation():
try:
send_confirmation_email(current_user.email)
message = Markup(
"Email sent to confirm your email address. Please check your inbox!")
flash(message, 'success')
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
except IntegrityError:
message = Markup(
"Error! Unable to send email to confirm your email address.")
flash(message, 'danger')
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
return redirect(url_for('users.login'))
@users_blueprint.route('/email_change', methods=["GET", "POST"])
@login_required
def user_email_change():
form = EmailForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
user_check = User.query.filter_by(email=form.email.data).first()
if user_check is None:
user = current_user
user.email = form.email.data
user.email_confirmed = False
user.email_confirmed_on = None
user.email_confirmation_sent_on = datetime.now()
db.session.add(user)
db.session.commit()
send_confirmation_email(user.email)
message = Markup(
"Email changed! Please confirm your new email address (link sent to new email)")
flash(message, 'success')
return redirect(url_for('users.user_profile'))
else:
message = Markup(
"Sorry, that email already exists!")
flash(message, 'danger')
except IntegrityError:
message = Markup(
"Sorry, that email already exists!")
flash(message, 'danger')
return render_template('email_change.html', form=form)
|
test_admission_controller.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Tests admission control
import itertools
import logging
import os
import pytest
import re
import shutil
import sys
import threading
from copy import copy
from time import sleep, time
from beeswaxd.BeeswaxService import QueryState
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
from tests.common.environ import build_flavor_timeout, ImpalaTestClusterProperties
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.resource_pool_config import ResourcePoolConfig
from tests.common.skip import (
SkipIfS3,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfNotHdfsMinicluster,
SkipIfOS)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.test_vector import ImpalaTestDimension
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from tests.util.web_pages_util import (
get_num_completed_backends,
get_mem_admitted_backends_debug_page)
from tests.verifiers.mem_usage_verifier import MemUsageVerifier
from tests.verifiers.metric_verifier import MetricVerifier
from ImpalaService import ImpalaHiveServer2Service
from TCLIService import TCLIService
LOG = logging.getLogger('admission_test')
# The query used for testing. It is important that this query returns many rows
# while keeping fragments active on all backends. This allows a thread to keep
# the query active and consuming resources by fetching one row at a time. The
# where clause is for debugging purposes; each thread will insert its id so
# that running queries can be correlated with the thread that submitted them.
QUERY = " union all ".join(["select * from functional.alltypesagg where id != {0}"] * 30)
# Same query but with additional unpartitioned non-coordinator fragments.
# The unpartitioned fragments are both interior fragments that consume input
# from a scan fragment and non-interior fragments with a constant UNION.
QUERY_WITH_UNPARTITIONED_FRAGMENTS = """
select *, (select count(distinct int_col) from alltypestiny) subquery1,
(select count(distinct int_col) from alltypes) subquery2,
(select 1234) subquery3
from (""" + QUERY + """) v"""
# The statestore heartbeat and topic update frequency (ms). Set low for testing.
STATESTORE_RPC_FREQUENCY_MS = 100
# Time to sleep (in milliseconds) between issuing queries. When the delay is at least
# the statestore heartbeat frequency, all state should be visible by every impalad by
# the time the next query is submitted. Otherwise the different impalads will see stale
# state for some admission decisions.
SUBMISSION_DELAY_MS = \
[0, STATESTORE_RPC_FREQUENCY_MS / 2, STATESTORE_RPC_FREQUENCY_MS * 3 / 2]
# Whether we will submit queries to all available impalads (in a round-robin fashion)
ROUND_ROBIN_SUBMISSION = [True, False]
# The query pool to use. The impalads should be configured to recognize this
# pool with the parameters below.
POOL_NAME = "default-pool"
# Stress test timeout (seconds). The timeout needs to be significantly higher for
# slow builds like code coverage and ASAN (IMPALA-3790, IMPALA-6241).
STRESS_TIMEOUT = build_flavor_timeout(90, slow_build_timeout=600)
# The number of queries that can execute concurrently in the pool POOL_NAME.
MAX_NUM_CONCURRENT_QUERIES = 5
# The number of queries that can be queued in the pool POOL_NAME
MAX_NUM_QUEUED_QUERIES = 10
# Mem limit (bytes) used in the mem limit test
MEM_TEST_LIMIT = 12 * 1024 * 1024 * 1024
_STATESTORED_ARGS = ("-statestore_heartbeat_frequency_ms={freq_ms} "
"-statestore_priority_update_frequency_ms={freq_ms}").format(
freq_ms=STATESTORE_RPC_FREQUENCY_MS)
# Name of the subscriber metric tracking the admission control update interval.
REQUEST_QUEUE_UPDATE_INTERVAL =\
'statestore-subscriber.topic-impala-request-queue.update-interval'
# Key in the query profile for the query options.
PROFILE_QUERY_OPTIONS_KEY = "Query Options (set by configuration): "
# The different ways that a query thread can end its query.
QUERY_END_BEHAVIORS = ['EOS', 'CLIENT_CANCEL', 'QUERY_TIMEOUT', 'CLIENT_CLOSE']
# The timeout used for the QUERY_TIMEOUT end behaviour
QUERY_END_TIMEOUT_S = 3
# Value used for --admission_control_stale_topic_threshold_ms in tests.
STALE_TOPIC_THRESHOLD_MS = 500
# Regex that matches the first part of the profile info string added when a query is
# queued.
INITIAL_QUEUE_REASON_REGEX = \
"Initial admission queue reason: waited [0-9]* ms, reason: .*"
# The path to resources directory which contains the admission control config files.
RESOURCES_DIR = os.path.join(os.environ['IMPALA_HOME'], "fe", "src", "test", "resources")
def impalad_admission_ctrl_flags(max_requests, max_queued, pool_max_mem,
proc_mem_limit=None, queue_wait_timeout_ms=None,
admission_control_slots=None, executor_groups=None):
extra_flags = ""
if proc_mem_limit is not None:
extra_flags += " -mem_limit={0}".format(proc_mem_limit)
if queue_wait_timeout_ms is not None:
extra_flags += " -queue_wait_timeout_ms={0}".format(queue_wait_timeout_ms)
if admission_control_slots is not None:
extra_flags += " -admission_control_slots={0}".format(admission_control_slots)
if executor_groups is not None:
extra_flags += " -executor_groups={0}".format(executor_groups)
return ("-vmodule admission-controller=3 -default_pool_max_requests {0} "
"-default_pool_max_queued {1} -default_pool_mem_limit {2} {3}".format(
max_requests, max_queued, pool_max_mem, extra_flags))
def impalad_admission_ctrl_config_args(fs_allocation_file, llama_site_file,
additional_args="", make_copy=False):
fs_allocation_path = os.path.join(RESOURCES_DIR, fs_allocation_file)
llama_site_path = os.path.join(RESOURCES_DIR, llama_site_file)
if make_copy:
copy_fs_allocation_path = os.path.join(RESOURCES_DIR, "copy-" + fs_allocation_file)
copy_llama_site_path = os.path.join(RESOURCES_DIR, "copy-" + llama_site_file)
shutil.copy2(fs_allocation_path, copy_fs_allocation_path)
shutil.copy2(llama_site_path, copy_llama_site_path)
fs_allocation_path = copy_fs_allocation_path
llama_site_path = copy_llama_site_path
return ("-vmodule admission-controller=3 -fair_scheduler_allocation_path %s "
"-llama_site_path %s %s" % (fs_allocation_path, llama_site_path,
additional_args))
def log_metrics(log_prefix, metrics):
LOG.info("%sadmitted=%s, queued=%s, dequeued=%s, rejected=%s, "
"released=%s, timed-out=%s", log_prefix, metrics['admitted'], metrics['queued'],
metrics['dequeued'], metrics['rejected'], metrics['released'],
metrics['timed-out'])
def compute_metric_deltas(m2, m1):
"""Returns a dictionary of the differences of metrics in m2 and m1 (m2 - m1)"""
return dict((n, m2.get(n, 0) - m1.get(n, 0)) for n in m2.keys())
def metric_key(pool_name, metric_name):
"""Helper method to construct the admission controller metric keys"""
return "admission-controller.%s.%s" % (metric_name, pool_name)
class TestAdmissionControllerBase(CustomClusterTestSuite):
@classmethod
def get_workload(self):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerBase, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# There's no reason to test this on other file formats/compression codecs right now
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
class TestAdmissionController(TestAdmissionControllerBase, HS2TestSuite):
def __check_pool_rejected(self, client, pool, expected_error_re):
try:
client.set_configuration({'request_pool': pool})
client.execute("select 1")
assert False, "Query should return error"
except ImpalaBeeswaxException as e:
assert re.search(expected_error_re, str(e))
def __check_query_options(self, profile, expected_query_options):
"""Validate that the expected per-pool query options were set on the specified
profile. expected_query_options is a list of "KEY=VALUE" strings, e.g.
["MEM_LIMIT=1", ...]"""
confs = []
for line in profile.split("\n"):
if PROFILE_QUERY_OPTIONS_KEY in line:
rhs = re.split(": ", line)[1]
confs = re.split(",", rhs)
break
expected_set = set([x.lower() for x in expected_query_options])
confs_set = set([x.lower() for x in confs])
assert expected_set.issubset(confs_set)
def __check_hs2_query_opts(self, pool_name, mem_limit=None, expected_options=None):
""" Submits a query via HS2 (optionally with a mem_limit in the confOverlay)
into pool_name and checks that the expected_query_options are set in the
profile."""
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = {'request_pool': pool_name}
if mem_limit is not None: execute_statement_req.confOverlay['mem_limit'] = mem_limit
execute_statement_req.statement = "select 1"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
close_operation_req = TCLIService.TCloseOperationReq()
close_operation_req.operationHandle = execute_statement_resp.operationHandle
HS2TestSuite.check_response(self.hs2_client.CloseOperation(close_operation_req))
get_profile_req = ImpalaHiveServer2Service.TGetRuntimeProfileReq()
get_profile_req.operationHandle = execute_statement_resp.operationHandle
get_profile_req.sessionHandle = self.session_handle
get_profile_resp = self.hs2_client.GetRuntimeProfile(get_profile_req)
HS2TestSuite.check_response(get_profile_resp)
self.__check_query_options(get_profile_resp.profile, expected_options)
def _execute_and_collect_profiles(self, queries, timeout_s, config_options={},
allow_query_failure=False):
"""Submit the query statements in 'queries' in parallel to the first impalad in
the cluster. After submission, the results are fetched from the queries in
sequence and their profiles are collected. Wait for up to timeout_s for
each query to finish. If 'allow_query_failure' is True, succeeds if the query
completes successfully or ends up in the EXCEPTION state. Otherwise expects the
queries to complete successfully.
Returns the profile strings."""
client = self.cluster.impalads[0].service.create_beeswax_client()
expected_states = [client.QUERY_STATES['FINISHED']]
if allow_query_failure:
expected_states.append(client.QUERY_STATES['EXCEPTION'])
try:
handles = []
profiles = []
client.set_configuration(config_options)
for query in queries:
handles.append(client.execute_async(query))
for query, handle in zip(queries, handles):
state = self.wait_for_any_state(handle, expected_states, timeout_s)
if state == client.QUERY_STATES['FINISHED']:
self.client.fetch(query, handle)
profiles.append(self.client.get_runtime_profile(handle))
return profiles
finally:
client.close()
def get_ac_process(self):
"""Returns the Process that is running the admission control service."""
return self.cluster.impalads[0]
def get_ac_log_name(self):
"""Returns the prefix of the log files for the admission control process."""
return "impalad"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
default_query_options=[('mem_limit', 200000000)],
statestored_args=_STATESTORED_ARGS)
@needs_session(conf_overlay={'batch_size': '100'})
def test_set_request_pool(self):
"""Tests setting the REQUEST_POOL with the pool placement policy configured
to require a specific pool, and validate that the per-pool configurations were
applied."""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
# Expected default mem limit for queueA, used in several tests below
queueA_mem_limit = "MEM_LIMIT=%s" % (128 * 1024 * 1024)
try:
for pool in ['', 'not_a_pool_name']:
expected_error =\
"No mapping found for request from user '\S+' with requested pool '%s'"\
% (pool)
self.__check_pool_rejected(client, pool, expected_error)
# Check rejected if user does not have access.
expected_error = "Request from user '\S+' with requested pool 'root.queueC' "\
"denied access to assigned pool 'root.queueC'"
self.__check_pool_rejected(client, 'root.queueC', expected_error)
# Also try setting a valid pool
client.set_configuration({'request_pool': 'root.queueB'})
result = client.execute("select 1")
# Query should execute in queueB which doesn't have a default mem limit set in the
# llama-site.xml, so it should inherit the value from the default process query
# options.
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB'])
# Try setting the pool for a queue with a very low queue timeout.
# queueA allows only 1 running query and has a queue timeout of 50ms, so the
# second concurrent query should time out quickly.
client.set_configuration({'request_pool': 'root.queueA'})
handle = client.execute_async("select sleep(1000)")
# Wait for query to clear admission control and get accounted for
client.wait_for_admission_control(handle)
self.__check_pool_rejected(client, 'root.queueA', "exceeded timeout")
assert client.get_state(handle) == client.QUERY_STATES['FINISHED']
# queueA has default query options mem_limit=128m,query_timeout_s=5
self.__check_query_options(client.get_runtime_profile(handle),
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA'])
client.close_query(handle)
# Should be able to set query options via the set command (overriding defaults if
# applicable). mem_limit overrides the pool default. abort_on_error has no
# proc/pool default.
client.execute("set mem_limit=31337")
client.execute("set abort_on_error=1")
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=31337', 'ABORT_ON_ERROR=1', 'QUERY_TIMEOUT_S=5',
'REQUEST_POOL=root.queueA'])
# Should be able to set query options (overriding defaults if applicable) with the
# config overlay sent with the query RPC. mem_limit is a pool-level override and
# max_io_buffers has no proc/pool default.
client.set_configuration({'request_pool': 'root.queueA', 'mem_limit': '12345'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA',
'ABORT_ON_ERROR=1'])
# Once options are reset to their defaults, the queue
# configuration should kick back in. We'll see the
# queue-configured mem_limit, and we won't see
# abort on error, because it's back to being the default.
client.execute('set mem_limit=""')
client.execute('set abort_on_error=""')
client.set_configuration({'request_pool': 'root.queueA'})
result = client.execute("select 1")
self.__check_query_options(result.runtime_profile,
[queueA_mem_limit, 'REQUEST_POOL=root.queueA', 'QUERY_TIMEOUT_S=5'])
finally:
client.close()
# HS2 tests:
# batch_size is set in the HS2 OpenSession() call via the requires_session() test
# decorator, so that is included in all test cases below.
batch_size = "BATCH_SIZE=100"
# Check HS2 query in queueA gets the correct query options for the pool.
self.__check_hs2_query_opts("root.queueA", None,
[queueA_mem_limit, 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check overriding the mem limit sent in the confOverlay with the query.
self.__check_hs2_query_opts("root.queueA", '12345',
['MEM_LIMIT=12345', 'QUERY_TIMEOUT_S=5', 'REQUEST_POOL=root.queueA', batch_size])
# Check HS2 query in queueB gets the process-wide default query options
self.__check_hs2_query_opts("root.queueB", None,
['MEM_LIMIT=200000000', 'REQUEST_POOL=root.queueB', batch_size])
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml",
additional_args="-require_username -anonymous_user_name="),
statestored_args=_STATESTORED_ARGS)
def test_require_user(self):
open_session_req = TCLIService.TOpenSessionReq()
open_session_req.username = ""
open_session_resp = self.hs2_client.OpenSession(open_session_req)
TestAdmissionController.check_response(open_session_resp)
try:
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = open_session_resp.sessionHandle
execute_statement_req.statement = "select count(1) from functional.alltypes"
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
self.wait_for_operation_state(execute_statement_resp.operationHandle,
TCLIService.TOperationState.ERROR_STATE)
get_operation_status_resp = self.get_operation_status(
execute_statement_resp.operationHandle)
assert "User must be specified" in get_operation_status_resp.errorMessage
finally:
close_req = TCLIService.TCloseSessionReq()
close_req.sessionHandle = open_session_resp.sessionHandle
TestAdmissionController.check_response(self.hs2_client.CloseSession(close_req))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_trivial_coord_query_limits(self):
"""Tests that trivial coordinator only queries have negligible resource requirements.
"""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Queries with only constant exprs or limit 0 should be admitted.
self.execute_query_expect_success(self.client, "select 1")
self.execute_query_expect_success(self.client,
"select * from functional.alltypes limit 0")
non_trivial_queries = [
"select * from functional.alltypesagg limit 1",
"select * from functional.alltypestiny"]
for query in non_trivial_queries:
ex = self.execute_query_expect_failure(self.client, query)
assert re.search("Rejected query from pool default-pool: request memory needed "
".* is greater than pool max mem resources 10.00 MB", str(ex))
@SkipIfS3.hdfs_block_size
@SkipIfABFS.hdfs_block_size
@SkipIfADLS.hdfs_block_size
@SkipIfEC.fix_later
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=40 * 1024 * 1024, proc_mem_limit=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_memory_rejection(self, vector):
"""Test that rejection of queries based on reservation and estimates works as
expected. The test depends on scanner memory estimates, which different on remote
filesystems with different (synthetic) block sizes."""
# Test that the query will be rejected by admission control if:
# a) the largest per-backend min buffer reservation is larger than the query mem limit
# b) the largest per-backend min buffer reservation is larger than the
# buffer_pool_limit query option
# c) the cluster-wide min-buffer reservation size is larger than the pool memory
# resources.
self.run_test_case('QueryTest/admission-reject-min-reservation', vector)
# Test that queries are rejected based on memory estimates. Set num_nodes=1 to
# avoid unpredictability from scheduling on different backends.
exec_options = vector.get_value('exec_option')
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-reject-mem-estimate', vector)
# Process mem_limit used in test_mem_limit_upper_bound
PROC_MEM_TEST_LIMIT = 1024 * 1024 * 1024
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT))
def test_mem_limit_upper_bound(self, vector):
""" Test to ensure that a query is admitted if the requested memory is equal to the
process mem limit"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Setting requested memory equal to process memory limit
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
self.execute_query_expect_success(self.client, query, exec_options)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT, proc_mem_limit=PROC_MEM_TEST_LIMIT),
num_exclusive_coordinators=1)
def test_mem_limit_dedicated_coordinator(self, vector):
"""Regression test for IMPALA-8469: coordinator fragment should be admitted on
dedicated coordinator"""
query = "select * from functional.alltypesagg limit 1"
exec_options = vector.get_value('exec_option')
# Test both single-node and distributed plans
for num_nodes in [0, 1]:
# Memory just fits in memory limits
exec_options['mem_limit'] = self.PROC_MEM_TEST_LIMIT
exec_options['num_nodes'] = num_nodes
self.execute_query_expect_success(self.client, query, exec_options)
# A bit too much memory to run on coordinator.
exec_options['mem_limit'] = long(self.PROC_MEM_TEST_LIMIT * 1.1)
ex = self.execute_query_expect_failure(self.client, query, exec_options)
assert ("Rejected query from pool default-pool: request memory needed "
"1.10 GB is greater than memory available for admission 1.00 GB" in
str(ex)), str(ex)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is different than
the ones on executors."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=True)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml")
+ " -use_dedicated_coordinator_estimates false",
num_exclusive_coordinators=1,
cluster_size=2)
def test_dedicated_coordinator_legacy_mem_accounting(self, vector):
"""Verify that when using dedicated coordinators with specialized dedicated coord
estimates turned off using a hidden startup param, the memory admitted for and the
mem limit applied to the query fragments running on the coordinator is the same
(as expected from legacy behavior)."""
self.__verify_mem_accounting(vector, using_dedicated_coord_estimates=False)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml"), num_exclusive_coordinators=1,
cluster_size=2)
def test_sanity_checks_dedicated_coordinator(self, vector, unique_database):
"""Sanity tests for verifying targeted dedicated coordinator memory estimations and
behavior."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
exec_options = vector.get_value('exec_option')
# Make sure query option MAX_MEM_ESTIMATE_FOR_ADMISSION is enforced on the dedicated
# coord estimates. Without this query option the estimate would be > 100MB.
expected_mem = 60 * (1 << 20) # 60MB
exec_options['MAX_MEM_ESTIMATE_FOR_ADMISSION'] = expected_mem
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001,\
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# If the query is only scheduled on the coordinator then the mem to admit on executor
# should be zero.
exec_options['NUM_NODES'] = 1
self.client.set_configuration(exec_options)
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
mem_to_admit = self.__get_mem_limits_admission_debug_page()
assert abs(mem_to_admit['coordinator'] - expected_mem) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
assert abs(mem_to_admit['executor'] - 0) < 0.0001, \
"mem_to_admit:" + str(mem_to_admit)
self.client.close_query(handle)
# Make sure query execution works perfectly for a query that does not have any
# fragments schdeuled on the coordinator, but has runtime-filters that need to be
# aggregated at the coordinator.
exec_options = vector.get_value('exec_option')
exec_options['RUNTIME_FILTER_WAIT_TIME_MS'] = 30000
query = """CREATE TABLE {0}.temp_tbl AS SELECT STRAIGHT_JOIN o_orderkey
FROM tpch_parquet.lineitem INNER JOIN [SHUFFLE] tpch_parquet.orders
ON o_orderkey = l_orderkey GROUP BY 1""".format(unique_database)
result = self.execute_query_expect_success(self.client, query, exec_options)
assert "Runtime filters: All filters arrived" in result.runtime_profile
def __verify_mem_accounting(self, vector, using_dedicated_coord_estimates):
"""Helper method used by test_dedicated_coordinator_*_mem_accounting that verifies
the actual vs expected values for mem admitted and mem limit for both coord and
executor. Also verifies that those memory values are different if
'using_dedicated_coord_estimates' is true."""
self.client.set_configuration_option('request_pool', "root.regularPool")
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
# Use a test query that has unpartitioned non-coordinator fragments to make
# sure those are handled correctly (IMPALA-10036).
for query in [QUERY, QUERY_WITH_UNPARTITIONED_FRAGMENTS]:
handle = self.client.execute_async(query.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
actual_mem_limits = self.__get_mem_limits_memz_debug_page(handle.get_handle().id)
mem_admitted =\
get_mem_admitted_backends_debug_page(self.cluster, self.get_ac_process())
debug_string = " expected_mem_limits:" + str(
expected_mem_limits) + " actual_mem_limits:" + str(
actual_mem_limits) + " mem_admitted:" + str(mem_admitted)
MB = 1 << 20
# Easiest way to check float in-equality.
assert abs(expected_mem_limits['coordinator'] - expected_mem_limits[
'executor']) > 0.0001 or not using_dedicated_coord_estimates, debug_string
# There may be some rounding errors so keep a margin of 5MB when verifying
assert abs(actual_mem_limits['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(actual_mem_limits['executor'] - expected_mem_limits[
'executor']) < 5 * MB, debug_string
assert abs(mem_admitted['coordinator'] - expected_mem_limits[
'coordinator']) < 5 * MB, debug_string
assert abs(
mem_admitted['executor'][0] - expected_mem_limits['executor']) < 5 * MB, \
debug_string
# Ensure all fragments finish executing before running next query.
self.client.fetch(query, handle)
self.client.close_query(handle)
def __get_mem_limits_admission_debug_page(self):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem_limit calculated by the admission controller from the impala admission debug page
of the coordinator impala daemon. Returns a dictionary with the keys 'coordinator'
and 'executor' and their respective mem values in bytes."""
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
response_json = self.get_ac_process().service.get_debug_webpage_json("admission")
assert 'resource_pools' in response_json
assert len(response_json['resource_pools']) == 1
assert response_json['resource_pools'][0]['running_queries']
assert len(response_json['resource_pools'][0]['running_queries']) == 1
query_info = response_json['resource_pools'][0]['running_queries'][0]
return {'coordinator': float(query_info["coord_mem_to_admit"]),
'executor': float(query_info["mem_limit"])}
def __get_mem_limits_memz_debug_page(self, query_id):
"""Helper method assumes a 2 node cluster using a dedicated coordinator. Returns the
mem limits enforced on the query (identified by the 'query_id') extracted from
mem-tracker's output on the memz debug page of the dedicated coordinator and the
executor impala daemons. Returns a dictionary with the keys 'coordinator' and
'executor' and their respective mem values in bytes."""
metric_name = "Query({0})".format(query_id)
# Based on how the cluster is setup, the first impalad in the cluster is the
# coordinator.
mem_trackers = [MemUsageVerifier(i.service).get_mem_usage_values(metric_name) for i in
self.cluster.impalads]
return {'coordinator': float(mem_trackers[0]['limit']),
'executor': float(mem_trackers[1]['limit'])}
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_dedicated_coordinator_planner_estimates(self, vector, unique_database):
"""Planner tests to add coverage for coordinator estimates when using dedicated
coordinators. Also includes coverage for verifying cluster memory admitted."""
vector_copy = copy(vector)
exec_options = vector_copy.get_value('exec_option')
# Remove num_nodes from the options to allow test case runner to set it in one of
# the test cases.
del exec_options['num_nodes']
exec_options['num_scanner_threads'] = 1 # To make estimates consistently reproducible
self.run_test_case('QueryTest/dedicated-coord-mem-estimates', vector_copy,
unique_database)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1, cluster_size=2)
def test_mem_limit_executors(self, vector, unique_database):
"""Verify that the query option mem_limit_executors is only enforced on the
executors."""
expected_exec_mem_limit = "999999999"
ImpalaTestSuite.change_database(self.client, vector.get_value('table_format'))
self.client.set_configuration({"MEM_LIMIT_EXECUTORS": expected_exec_mem_limit})
handle = self.client.execute_async(QUERY.format(1))
self.client.wait_for_finished_timeout(handle, 1000)
expected_mem_limits = self.__get_mem_limits_admission_debug_page()
assert expected_mem_limits['executor'] > expected_mem_limits[
'coordinator'], expected_mem_limits
assert expected_mem_limits['executor'] == float(
expected_exec_mem_limit), expected_mem_limits
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=2, max_queued=1,
pool_max_mem=10 * PROC_MEM_TEST_LIMIT,
queue_wait_timeout_ms=2 * STATESTORE_RPC_FREQUENCY_MS),
start_args="--per_impalad_args=-mem_limit=3G;-mem_limit=3G;-mem_limit=2G",
statestored_args=_STATESTORED_ARGS)
def test_heterogeneous_proc_mem_limit(self, vector):
""" Test to ensure that the admission controller takes into account the actual proc
mem limits of each impalad. Starts a cluster where the last impalad has a smaller
proc mem limit than other impalads and runs queries where admission/rejection decision
depends on the coordinator knowing the other impalad's mem limits.
The queue_wait_timeout_ms has been set to be more than the prioritized statestore
update time, so that the queries don't time out before receiving updates to pool
stats"""
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg, (select 1) B limit 1"
# Successfully run a query with mem limit equal to the lowest process memory among
# impalads
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
self.execute_query_expect_success(self.client, query, exec_options)
# Test that a query scheduled to run on a single node and submitted to the impalad
# with higher proc mem limit succeeds.
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
exec_options['num_nodes'] = "1"
self.execute_query_expect_success(self.client, query, exec_options)
# Exercise rejection checks in admission controller.
try:
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "3G"
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Rejected query from pool \S+: request memory needed 3.00 GB"
" is greater than memory available for admission 2.00 GB of \S+", str(e)), \
str(e)
# Exercise queuing checks in admission controller.
try:
# Wait for previous queries to finish to avoid flakiness.
for impalad in self.cluster.impalads:
impalad.service.wait_for_metric_value("impala-server.num-fragments-in-flight", 0)
impalad_with_2g_mem = self.cluster.impalads[2].service.create_beeswax_client()
impalad_with_2g_mem.set_configuration_option('mem_limit', '1G')
impalad_with_2g_mem.execute_async("select sleep(1000)")
# Wait for statestore update to update the mem admitted in each node.
sleep(STATESTORE_RPC_FREQUENCY_MS / 1000)
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "2G"
# Since Queuing is synchronous and we can't close the previous query till this
# returns, we wait for this to timeout instead.
self.execute_query(query, exec_options)
except ImpalaBeeswaxException as e:
assert re.search("Queued reason: Not enough memory available on host \S+.Needed "
"2.00 GB but only 1.00 GB out of 2.00 GB was available.", str(e)), str(e)
finally:
if impalad_with_2g_mem is not None:
impalad_with_2g_mem.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--logbuflevel=-1 " + impalad_admission_ctrl_flags(max_requests=1,
max_queued=1, pool_max_mem=PROC_MEM_TEST_LIMIT),
statestored_args=_STATESTORED_ARGS)
def test_cancellation(self):
""" Test to confirm that all Async cancellation windows are hit and are able to
succesfully cancel the query"""
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
try:
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
client.set_configuration_option("mem_limit", self.PROC_MEM_TEST_LIMIT + 1)
handle = client.execute_async("select 1")
sleep(1)
client.close_query(handle)
self.assert_log_contains(self.get_ac_log_name(), 'INFO',
"Ready to be Rejected but already cancelled, query id=")
client.clear_configuration()
client.set_configuration_option("debug_action", "AC_BEFORE_ADMISSION:SLEEP@2000")
handle = client.execute_async("select 2")
sleep(1)
client.close_query(handle)
self.assert_log_contains(self.get_ac_log_name(), 'INFO',
"Ready to be Admitted immediately but already cancelled, query id=")
client.set_configuration_option("debug_action",
"CRS_BEFORE_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 3")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=")
client.set_configuration_option("debug_action", "CRS_AFTER_COORD_STARTS:SLEEP@2000")
handle = client.execute_async("select 4")
sleep(1)
client.close_query(handle)
self.assert_impalad_log_contains('INFO',
"Cancelled right after starting the coordinator query id=", 2)
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
client.set_configuration_option("debug_action",
"AC_AFTER_ADMISSION_OUTCOME:SLEEP@2000")
queued_query_handle = client.execute_async("select 5")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
# Only cancel the queued query, because close will wait till it unregisters, this
# gives us a chance to close the running query and allow the dequeue thread to
# dequeue the queue query
client.cancel(queued_query_handle)
client.close_query(handle)
client.close_query(queued_query_handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile, queued_profile
self.assert_log_contains(
self.get_ac_log_name(), 'INFO', "Dequeued cancelled query=")
client.clear_configuration()
handle = client.execute_async("select sleep(10000)")
queued_query_handle = client.execute_async("select 6")
sleep(1)
assert client.get_state(queued_query_handle) == QueryState.COMPILED
assert "Admission result: Queued" in client.get_runtime_profile(queued_query_handle)
client.close_query(queued_query_handle)
client.close_query(handle)
queued_profile = client.get_runtime_profile(queued_query_handle)
assert "Admission result: Cancelled (queued)" in queued_profile
for i in self.cluster.impalads:
i.service.wait_for_metric_value(
"impala-server.num-fragments-in-flight", 0, timeout=20)
assert self.get_ac_process().service.get_metric_value(
"admission-controller.agg-num-running.default-pool") == 0
assert self.get_ac_process().service.get_metric_value(
"admission-controller.total-admitted.default-pool") == 4
assert self.get_ac_process().service.get_metric_value(
"admission-controller.total-queued.default-pool") == 2
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_num_queries(self):
"""Test that queue details appear in the profile when queued based on num_queries."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select sleep(1000)"
TIMEOUT_S = 60
EXPECTED_REASON = \
"Latest admission queue reason: number of running queries 1 is at or over limit 1"
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S)
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons if 'number of running queries' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of num_queries: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_memory(self):
"""Test that queue details appear in the profile when queued based on memory."""
# Run a bunch of queries with mem_limit set so that only one can be admitted at a
# time- one should get admitted immediately, the rest should be dequeued one-by-one.
STMT = "select sleep(100)"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough aggregate memory " +\
"available in pool default-pool with max mem resources 10.00 MB. Needed 9.00 MB" \
" but only 1.00 MB was available."
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '9mb'})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail for detail in init_queue_reasons
if 'Not enough aggregate memory available' in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of memory: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
def __extract_init_queue_reasons(self, profiles):
"""Return a list of the 'Admission Queue details' strings found in 'profiles'"""
matches = [re.search(INITIAL_QUEUE_REASON_REGEX, profile) for profile in profiles]
return [match.group(0) for match in matches if match is not None]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=10 * 1024 * 1024, proc_mem_limit=2 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_host_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB host memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to host memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough memory available on host.*"""\
"""Stats for host.*"""\
"""topN_query_stats.*"""\
"""all_query_stats:.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=10, max_queued=10,
pool_max_mem=2 * 1024 * 1024, proc_mem_limit=20 * 1024 * 1024,
queue_wait_timeout_ms=1000),
statestored_args=_STATESTORED_ARGS)
def test_timeout_reason_pool_memory(self):
"""Test that queue details appear in the profile when queued and then timed out
due to a small 2MB pool memory limit configuration."""
# Run a bunch of queries with mem_limit set so that only one can be admitted
# immediately. The rest should be queued and dequeued (timeout) due to pool memory
# pressure.
STMT = "select sleep(100)"
TIMEOUT_S = 20
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, {'mem_limit': '2mb'}, True)
EXPECTED_REASON = """.*Admission for query exceeded timeout 1000ms in pool """\
"""default-pool.*"""\
"""Not enough aggregate memory available in pool default-pool.*"""\
"""Aggregated stats for pool.*"""\
"""topN_query_stats.*"""
num_reasons = len([profile for profile in profiles
if re.search(EXPECTED_REASON, profile, re.DOTALL)])
assert num_reasons >= 1, \
"At least one query should have been timed out with topN query details: " +\
'\n===\n'.join(profiles)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=100, max_queued=10,
pool_max_mem=-1, admission_control_slots=4,
executor_groups="default-pool-group1"),
statestored_args=_STATESTORED_ARGS)
def test_queue_reasons_slots(self):
"""Test that queue details appear in the profile when queued based on number of
slots."""
# Run a bunch of queries - one should get admitted immediately, the rest should
# be dequeued one-by-one.
STMT = "select min(ss_wholesale_cost) from tpcds_parquet.store_sales"
TIMEOUT_S = 60
EXPECTED_REASON = "Latest admission queue reason: Not enough admission control " +\
"slots available on host"
NUM_QUERIES = 5
coordinator_limited_metric = \
"admission-controller.total-dequeue-failed-coordinator-limited"
original_metric_value = self.get_ac_process().service.get_metric_value(
coordinator_limited_metric)
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, config_options={"mt_dop": 4})
num_reasons = len([profile for profile in profiles if EXPECTED_REASON in profile])
assert num_reasons == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
init_queue_reasons = self.__extract_init_queue_reasons(profiles)
assert len(init_queue_reasons) == NUM_QUERIES - 1, \
"All queries except first should have been queued: " + '\n===\n'.join(profiles)
over_limit_details = [detail
for detail in init_queue_reasons
if "Not enough admission control slots available on host" in detail]
assert len(over_limit_details) == 1, \
"One query initially queued because of slots: " + '\n===\n'.join(profiles)
queue_not_empty_details = [detail
for detail in init_queue_reasons if 'queue is not empty' in detail]
assert len(queue_not_empty_details) == NUM_QUERIES - 2, \
"Others queued because of non-empty queue: " + '\n===\n'.join(profiles)
# Confirm that the cluster quiesces and all metrics return to zero.
for impalad in self.cluster.impalads:
verifier = MetricVerifier(impalad.service)
verifier.wait_for_backend_admission_control_state()
# The number of admission control slots on the coordinator is limited
# so the failures to dequeue should trigger a bump in the coordinator_limited_metric.
later_metric_value = self.get_ac_process().service.get_metric_value(
coordinator_limited_metric)
assert later_metric_value > original_metric_value, \
"Metric %s did not change" % coordinator_limited_metric
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024),
statestored_args=_STATESTORED_ARGS)
def test_query_locations_correctness(self, vector):
"""Regression test for IMPALA-7516: Test to make sure query locations and in-flight
queries are correct for different admission results that can affect it."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Choose a query that runs on all 3 backends.
query = "select * from functional.alltypesagg A, (select sleep(10000)) B limit 1"
# Case 1: When a query runs succesfully.
handle = self.client.execute_async(query)
self.__assert_num_queries_accounted(1)
self.close_query(handle)
self.__assert_num_queries_accounted(0)
# Case 2: When a query is queued then cancelled
handle_running = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_running)
handle_queued = self.client.execute_async(query)
self.client.wait_for_admission_control(handle_queued)
self.get_ac_process().service.wait_for_metric_value(
"admission-controller.total-queued.default-pool", 1)
# Queued queries don't show up on backends
self.__assert_num_queries_accounted(1, 1)
# First close the queued query
self.close_query(handle_queued)
self.close_query(handle_running)
self.__assert_num_queries_accounted(0)
# Case 3: When a query gets rejected
exec_options = copy(vector.get_value('exec_option'))
exec_options['mem_limit'] = "1b"
self.execute_query_expect_failure(self.client, query, exec_options)
self.__assert_num_queries_accounted(0)
def __assert_num_queries_accounted(self, num_running, num_queued=0):
"""Checks if the num of queries accounted by query_locations and in-flight are as
expected"""
# Wait for queries to start/un-register.
num_inflight = num_running + num_queued
assert self.impalad_test_service.wait_for_num_in_flight_queries(num_inflight)
query_locations = self.impalad_test_service.get_query_locations()
for host, num_q in query_locations.items():
assert num_q == num_running, "There should be {0} running queries on either " \
"impalads: {0}".format(query_locations)
@SkipIfNotHdfsMinicluster.tuned_for_minicluster
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_mem_limit_configs(self, vector):
"""Runs functional tests for the max/min_query_mem_limit pool config attributes"""
exec_options = vector.get_value('exec_option')
# Set this to the default.
exec_options['exec_single_node_rows_threshold'] = 100
# Set num_nodes to 1 since its easier to see one-to-one mapping of per_host and
# per_cluster values used in the test.
exec_options['num_nodes'] = 1
self.run_test_case('QueryTest/admission-max-min-mem-limits', vector)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="mem-limit-test-fair-scheduler.xml",
llama_site_file="mem-limit-test-llama-site.xml",
additional_args="-default_pool_max_requests 1", make_copy=True),
statestored_args=_STATESTORED_ARGS)
def test_pool_config_change_while_queued(self, vector):
"""Tests that the invalid checks work even if the query is queued. Makes sure that a
queued query is dequeued and rejected if the config is invalid."""
pool_name = "invalidTestPool"
config_str = "max-query-mem-limit"
self.client.set_configuration_option('request_pool', pool_name)
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
self.__wait_for_change_to_profile(sleep_query_handle,
"Admission result: Admitted immediately")
queued_query_handle = self.client.execute_async("select 2")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to be invalid.
llama_site_path = os.path.join(RESOURCES_DIR, "copy-mem-limit-test-llama-site.xml")
config = ResourcePoolConfig(
self.cluster.impalads[0].service, self.get_ac_process().service, llama_site_path)
config.set_config_value(pool_name, config_str, 1)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
# Change the config back to a valid value
config.set_config_value(pool_name, config_str, 0)
# Now do the same thing for change to pool.max-query-mem-limit such that it can no
# longer accommodate the largest min_reservation.
# Setup to queue a query.
sleep_query_handle = self.client.execute_async("select sleep(10000)")
self.client.wait_for_admission_control(sleep_query_handle)
queued_query_handle = self.client.execute_async(
"select * from functional_parquet.alltypes limit 1")
self.__wait_for_change_to_profile(queued_query_handle, "Admission result: Queued")
# Change config to something less than the what is required to accommodate the
# largest min_reservation (which in this case is 32.09 MB.
config.set_config_value(pool_name, config_str, 25 * 1024 * 1024)
# Close running query so the queued one gets a chance.
self.client.close_query(sleep_query_handle)
# Observe that the queued query fails.
self.wait_for_state(queued_query_handle, QueryState.EXCEPTION, 20),
self.close_query(queued_query_handle)
def __wait_for_change_to_profile(self, query_handle, search_string, timeout=20):
for _ in range(timeout * 10):
profile = self.client.get_runtime_profile(query_handle)
if search_string in profile:
return
sleep(0.1)
assert False, "Timed out waiting for change to profile\nSearch " \
"String: {0}\nProfile:\n{1}".format(search_string, str(profile))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=10,
pool_max_mem=1024 * 1024 * 1024))
@needs_session()
def test_queuing_status_through_query_log_and_exec_summary(self):
"""Test to verify that the HS2 client's GetLog() call and the ExecSummary expose
the query's queuing status, that is, whether the query was queued and what was the
latest queuing reason."""
# Start a long running query.
long_query_resp = self.execute_statement("select sleep(10000)")
# Ensure that the query has started executing.
self.wait_for_admission_control(long_query_resp.operationHandle)
# Submit another query.
queued_query_resp = self.execute_statement("select 1")
# Wait until the query is queued.
self.wait_for_operation_state(queued_query_resp.operationHandle,
TCLIService.TOperationState.PENDING_STATE)
# Check whether the query log message correctly exposes the queuing status.
log = self.wait_for_log_message(
queued_query_resp.operationHandle, "Admission result :")
assert "Admission result : Queued" in log, log
assert "Latest admission queue reason : number of running queries 1 is at or over "
"limit 1" in log, log
# Now check the same for ExecSummary.
summary_req = ImpalaHiveServer2Service.TGetExecSummaryReq()
summary_req.operationHandle = queued_query_resp.operationHandle
summary_req.sessionHandle = self.session_handle
exec_summary_resp = self.hs2_client.GetExecSummary(summary_req)
assert exec_summary_resp.summary.is_queued
assert "number of running queries 1 is at or over limit 1" in \
exec_summary_resp.summary.queued_reason,\
exec_summary_resp.summary.queued_reason
# Close the running query.
self.close(long_query_resp.operationHandle)
# Close the queued query.
self.close(queued_query_resp.operationHandle)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=1, max_queued=3,
pool_max_mem=1024 * 1024 * 1024) +
" --admission_control_stale_topic_threshold_ms={0}".format(
STALE_TOPIC_THRESHOLD_MS),
statestored_args=_STATESTORED_ARGS)
def test_statestore_outage(self):
"""Test behaviour with a failed statestore. Queries should continue to be admitted
but we should generate diagnostics about the stale topic."""
self.cluster.statestored.kill()
impalad = self.get_ac_process()
# Sleep until the update should be definitely stale.
sleep(STALE_TOPIC_THRESHOLD_MS / 1000. * 1.5)
ac_json = impalad.service.get_debug_webpage_json('/admission')
ms_since_update = ac_json["statestore_admission_control_time_since_last_update_ms"]
assert ms_since_update > STALE_TOPIC_THRESHOLD_MS
assert ("Warning: admission control information from statestore is stale:" in
ac_json["statestore_update_staleness_detail"])
# Submit a batch of queries. One should get to run, one will be rejected because
# of the full queue, and the others will run after being queued.
STMT = "select sleep(100)"
TIMEOUT_S = 60
NUM_QUERIES = 5
profiles = self._execute_and_collect_profiles([STMT for i in xrange(NUM_QUERIES)],
TIMEOUT_S, allow_query_failure=True)
ADMITTED_STALENESS_WARNING = \
"Warning: admission control information from statestore is stale"
ADMITTED_STALENESS_PROFILE_ENTRY = \
"Admission control state staleness: " + ADMITTED_STALENESS_WARNING
num_queued = 0
num_admitted_immediately = 0
num_rejected = 0
for profile in profiles:
if "Admission result: Admitted immediately" in profile:
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
num_admitted_immediately += 1
elif "Admission result: Rejected" in profile:
num_rejected += 1
# Check that the rejection error returned to the client contains a warning.
query_statuses = [line for line in profile.split("\n")
if "Query Status:" in line]
assert len(query_statuses) == 1, profile
assert ADMITTED_STALENESS_WARNING in query_statuses[0]
else:
assert "Admission result: Admitted (queued)" in profile, profile
assert ADMITTED_STALENESS_PROFILE_ENTRY in profile, profile
# Check that the queued reason contains a warning.
queued_reasons = [line for line in profile.split("\n")
if "Initial admission queue reason:" in line]
assert len(queued_reasons) == 1, profile
assert ADMITTED_STALENESS_WARNING in queued_reasons[0]
num_queued += 1
assert num_admitted_immediately == 1
assert num_queued == 3
assert num_rejected == NUM_QUERIES - num_admitted_immediately - num_queued
@pytest.mark.execute_serially
def test_impala_server_startup_delay(self):
"""This test verifies that queries get queued when the coordinator has already started
accepting client connections during startup, but the local backend descriptor is not
yet available."""
server_start_delay_s = 20
# We need to start the cluster here instead of during setup_method() so we can launch
# it from a separate thread.
def start_cluster():
LOG.info("Starting cluster")
impalad_args = "--debug_actions=IMPALA_SERVER_END_OF_START:SLEEP@%s" % (
1000 * server_start_delay_s)
self._start_impala_cluster(['--impalad_args=%s' % impalad_args])
# Initiate the cluster start
start_cluster_thread = threading.Thread(target=start_cluster)
start_cluster_thread.start()
# Wait some time to arrive at IMPALA_SERVER_END_OF_START
sleep(server_start_delay_s)
# With a new client, execute a query and observe that it gets queued and ultimately
# succeeds.
client = self.create_impala_client()
result = self.execute_query_expect_success(client, "select 1")
start_cluster_thread.join()
profile = result.runtime_profile
reasons = self.__extract_init_queue_reasons([profile])
assert len(reasons) == 1
assert "Coordinator not registered with the statestore." in reasons[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(num_exclusive_coordinators=1)
def test_release_backends(self, vector):
"""Test that executor backends are shutdown when they complete, that completed
executor backends release their admitted memory, and that
NumCompletedBackends is updated each time an executor backend completes."""
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
# Craft a query where part of the executor backends completes, while the rest remain
# running indefinitely. The query forces the 'lineitem' table to be treated as the
# small table even though it is bigger than the 'customer' table. This forces the
# small table scan ('lineitem' scan) to run on two nodes and the big table scan
# ('customers' scan) to run on a single node. By using debug actions to force the
# big table scan to hang indefinitely, the small table scan should finish quickly.
# This causes one executor backend to complete quickly, and causes the other one to
# hang.
vector.get_value('exec_option')['debug_action'] = '0:GETNEXT:WAIT'
query = "select STRAIGHT_JOIN * from tpch.customer JOIN /* +BROADCAST */ " \
"tpch.lineitem where customer.c_custkey = lineitem.l_orderkey limit 100"
# Amount of time to wait for the query to reach the running state before throwing a
# Timeout exception.
timeout = 10
handle = self.execute_query_async(query, vector.get_value('exec_option'))
try:
# Wait for the query to reach the running state (it should never reach the finished
# state because of the 'WAIT' debug action), wait for the 'lineitem' scan to
# complete, and then validate that one of the executor backends shutdowns and
# releases its admitted memory.
self.wait_for_state(handle, self.client.QUERY_STATES['RUNNING'], timeout)
# Once the 'lineitem' scan completes, NumCompletedBackends should be 1.
self.assert_eventually(60, 1, lambda: "NumCompletedBackends: 1 (1)"
in self.client.get_runtime_profile(handle))
get_num_completed_backends(self.cluster.impalads[0].service,
handle.get_handle().id) == 1
mem_admitted =\
get_mem_admitted_backends_debug_page(self.cluster, self.get_ac_process())
num_executor_zero_admitted = 0
for executor_mem_admitted in mem_admitted['executor']:
if executor_mem_admitted == 0:
num_executor_zero_admitted += 1
assert num_executor_zero_admitted == 1
finally:
# Once the query is closed, validate that all backends have shutdown.
self.client.close_query(handle)
mem_admitted = get_mem_admitted_backends_debug_page(self.cluster)
assert mem_admitted['coordinator'] == 0
for executor_mem_admitted in mem_admitted['executor']:
assert executor_mem_admitted == 0
class TestAdmissionControllerWithACService(TestAdmissionController):
"""Runs all of the tests from TestAdmissionController but with the second impalad in the
minicluster configured to perform all admission control."""
def get_ac_process(self):
return self.cluster.impalads[1]
def get_ac_log_name(self):
return "impalad_node1"
def setup_method(self, method):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
PER_IMPALAD_ACS_ARGS = [
'--is_admission_controller=false',
'--is_admission_controller=true',
'--is_admission_controller=false',
]
if 'start_args' not in method.func_dict:
method.func_dict['start_args'] = list()
method.func_dict['start_args'].append(
"--per_impalad_args=" + ";".join(PER_IMPALAD_ACS_ARGS))
if 'impalad_args' not in method.func_dict:
method.func_dict["impalad_args"] = ""
method.func_dict["impalad_args"] +=\
" --admission_control_service_addr=127.0.0.1:27001 "
super(TestAdmissionController, self).setup_method(method)
class TestAdmissionControllerStress(TestAdmissionControllerBase):
"""Submits a number of queries (parameterized) with some delay between submissions
(parameterized) and the ability to submit to one impalad or many in a round-robin
fashion. Each query is submitted on a separate thread. After admission, the query
thread will block with the query open and wait for the main thread to notify it to
end its query. The query thread can end its query by fetching to the end, cancelling
itself, closing itself, or waiting for the query timeout to take effect. Depending
on the test parameters a varying number of queries will be admitted, queued, and
rejected. After the queries are admitted, the main thread will request each admitted
query thread to end its query and allow queued queries to be admitted.
The test tracks the state of the admission controller using the metrics from each
impalad to do the following:
(1) After submitting all queries, the change in metrics for the number of admitted,
queued, and rejected requests should sum to the number of queries and that the
values are reasonable given the test parameters.
(2) While there are running queries:
* Request the currently running queries to end and wait for the queries to end.
Verify the metric for the number of completed queries. The threads that
submitted those queries will keep their connections open until the entire test
completes. This verifies that admission control is tied to the end of the query
and does not depend on closing the connection.
* Check that queued requests are then dequeued and verify using the metric for the
number of dequeued requests. The threads that were waiting to submit the query
should then insert themselves into a list of currently running queries and then
wait for a notification from the main thread.
(3) After all queries have completed, check that the final number of admitted,
queued, and rejected requests are reasonable given the test parameters. When
submitting to a single impalad, we know exactly what the values should be,
otherwise we just check that they are within reasonable bounds.
"""
@classmethod
def add_test_dimensions(cls):
super(TestAdmissionControllerStress, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('round_robin_submission', *ROUND_ROBIN_SUBMISSION))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('submission_delay_ms', *SUBMISSION_DELAY_MS))
# Additional constraints for code coverage jobs and core.
num_queries = 50
if ImpalaTestClusterProperties.get_instance().has_code_coverage():
# Code coverage builds can't handle the increased concurrency.
num_queries = 15
elif cls.exploration_strategy() == 'core':
num_queries = 30
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('submission_delay_ms') == 0)
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('round_robin_submission'))
# The number of queries to submit. The test does not support fewer queries than
# MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES to keep some validation logic
# simple.
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('num_queries', num_queries))
def setup(self):
# All threads are stored in this list and it's used just to make sure we clean up
# properly in teardown.
self.all_threads = list()
# Each submission thread will append() itself to this list if the query begins
# execution. The main thread will access this list to determine which threads are
# executing queries that can be cancelled (it will pop() elements from the front of
# the list). The individual operations on the list are atomic and thread-safe thanks
# to the GIL.
self.executing_threads = list()
def teardown(self):
# Set shutdown for all threads (cancel if needed)
for thread in self.all_threads:
try:
thread.lock.acquire()
thread.shutdown = True
if thread.query_handle is not None:
LOG.debug("Attempt to clean up thread executing query %s (state %s)",
thread.query_num, thread.query_state)
client = thread.impalad.service.create_beeswax_client()
try:
client.cancel(thread.query_handle)
finally:
client.close()
finally:
thread.lock.release()
# Wait for all threads to exit
for thread in self.all_threads:
thread.join(5)
LOG.debug("Join thread for query num %s %s", thread.query_num,
"TIMED OUT" if thread.isAlive() else "")
def get_ac_processes(self):
"""Returns a list of all Processes which may be used to perform admission control. If
round-robin submission is not being used, only the first Process in this list will
perform admission control."""
return self.cluster.impalads
def get_admission_metrics(self):
"""
Returns a map of the admission metrics, aggregated across all of the impalads.
The metrics names are shortened for brevity: 'admitted', 'queued', 'dequeued',
'rejected', 'released', and 'timed-out'.
"""
metrics = {'admitted': 0, 'queued': 0, 'dequeued': 0, 'rejected': 0,
'released': 0, 'timed-out': 0}
for impalad in self.ac_processes:
keys = [metric_key(self.pool_name, 'total-%s' % short_name)
for short_name in metrics.keys()]
values = impalad.service.get_metric_values(keys, [0] * len(keys))
for short_name, value in zip(metrics.keys(), values):
metrics[short_name] += value
return metrics
def get_consistent_admission_metrics(self, num_submitted):
"""Same as get_admission_metrics() except retries until it gets consistent metrics for
num_submitted queries. See IMPALA-6227 for an example of problems with inconsistent
metrics where a dequeued query is reflected in dequeued but not admitted."""
ATTEMPTS = 5
for i in xrange(ATTEMPTS):
metrics = self.get_admission_metrics()
admitted_immediately = num_submitted - metrics['queued'] - metrics['rejected']
if admitted_immediately + metrics['dequeued'] == metrics['admitted']:
return metrics
LOG.info("Got inconsistent metrics {0}".format(metrics))
assert False, "Could not get consistent metrics for {0} queries after {1} attempts: "\
"{2}".format(num_submitted, ATTEMPTS, metrics)
def wait_for_metric_changes(self, metric_names, initial, expected_delta):
"""
Waits for the sum of metrics in metric_names to change by at least expected_delta.
This is similar to ImpalaService.wait_for_metric_value(), but it uses one or more
metrics aggregated across all impalads, e.g. we want to wait for the total number of
admitted, queued, and rejected metrics to change some amount in total, but we don't
know exactly how the metrics will change individually.
'metric_names' is a list of the keys returned by get_admission_metrics() which are
expected to change.
'initial' is the initial set of metrics returned by get_admission_metrics() to
compare against.
'expected_delta' is the total change expected across all impalads for the specified
metrics.
"""
log_metrics("wait_for_metric_changes, initial=", initial)
current = initial
start_time = time()
while True:
current = self.get_admission_metrics()
log_metrics("wait_for_metric_changes, current=", current)
deltas = compute_metric_deltas(current, initial)
delta_sum = sum([deltas[x] for x in metric_names])
LOG.info("DeltaSum=%s Deltas=%s (Expected=%s for metrics=%s)",
delta_sum, deltas, expected_delta, metric_names)
if delta_sum >= expected_delta:
LOG.info("Found all %s metrics after %s seconds", delta_sum,
round(time() - start_time, 1))
return (deltas, current)
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting {0} seconds for metrics {1} delta {2} "\
"current {3} initial {4}" .format(
STRESS_TIMEOUT, ','.join(metric_names), expected_delta, str(current),
str(initial))
sleep(1)
def wait_for_statestore_updates(self, heartbeats):
"""Waits for a number of admission control statestore updates from all impalads."""
start_time = time()
init = dict()
curr = dict()
for impalad in self.impalads:
init[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
curr[impalad] = init[impalad]
while True:
LOG.debug("wait_for_statestore_updates: curr=%s, init=%s, d=%s", curr.values(),
init.values(), [curr[i] - init[i] for i in self.impalads])
if all([curr[i] - init[i] >= heartbeats for i in self.impalads]): break
for impalad in self.impalads:
curr[impalad] = impalad.service.get_metric_value(
REQUEST_QUEUE_UPDATE_INTERVAL)['count']
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for heartbeats" % (STRESS_TIMEOUT,)
sleep(STATESTORE_RPC_FREQUENCY_MS / float(1000))
LOG.info("Waited %s for %s heartbeats", round(time() - start_time, 1), heartbeats)
def wait_for_admitted_threads(self, num_threads):
"""
Wait for query submission threads to update after being admitted, as determined
by observing metric changes. This is necessary because the metrics may change
before the execute_async() calls on the query threads return and add themselves
to self.executing_threads.
"""
start_time = time()
LOG.info("Waiting for %s threads to begin execution", num_threads)
# All individual list operations are thread-safe, so we don't need to use a
# lock to synchronize before checking the list length (on which another thread
# may call append() concurrently).
while len(self.executing_threads) < num_threads:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s admitted client rpcs to return. Only %s executing " % (
STRESS_TIMEOUT, num_threads, len(self.executing_threads)))
sleep(0.1)
LOG.info("Found all %s admitted threads after %s seconds", num_threads,
round(time() - start_time, 1))
def end_admitted_queries(self, num_queries):
"""
Requests each admitted query to end its query.
"""
assert len(self.executing_threads) >= num_queries
LOG.info("Requesting {0} clients to end queries".format(num_queries))
# Request admitted clients to end their queries
current_executing_queries = []
for i in xrange(num_queries):
# pop() is thread-safe, it's OK if another thread is appending concurrently.
thread = self.executing_threads.pop(0)
LOG.info("Cancelling query %s", thread.query_num)
assert thread.query_state == 'ADMITTED'
current_executing_queries.append(thread)
thread.query_state = 'REQUEST_QUERY_END'
# Wait for the queries to end
start_time = time()
while True:
all_done = True
for thread in self.all_threads:
if thread.query_state == 'REQUEST_QUERY_END':
all_done = False
if all_done:
break
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query end" % (STRESS_TIMEOUT,)
sleep(1)
class SubmitQueryThread(threading.Thread):
def __init__(self, impalad, additional_query_options, vector, query_num,
query_end_behavior, executing_threads):
"""
executing_threads must be provided so that this thread can add itself when the
query is admitted and begins execution.
"""
super(self.__class__, self).__init__()
self.executing_threads = executing_threads
self.vector = vector
self.additional_query_options = additional_query_options
self.query_num = query_num
self.query_end_behavior = query_end_behavior
self.impalad = impalad
self.error = None
# query_state is defined and used only by the test code, not a property exposed by
# the server
self.query_state = 'NOT_SUBMITTED'
# lock protects query_handle and shutdown, used by the main thread in teardown()
self.lock = threading.RLock()
self.query_handle = None
self.shutdown = False # Set by the main thread when tearing down
def run(self):
client = None
try:
try:
# Take the lock while query_handle is being created to avoid an unlikely race
# condition with teardown() (i.e. if an error occurs on the main thread), and
# check if the test is already shut down.
self.lock.acquire()
if self.shutdown:
return
exec_options = self.vector.get_value('exec_option')
exec_options.update(self.additional_query_options)
query = QUERY.format(self.query_num)
self.query_state = 'SUBMITTING'
client = self.impalad.service.create_beeswax_client()
ImpalaTestSuite.change_database(client, self.vector.get_value('table_format'))
client.set_configuration(exec_options)
if self.query_end_behavior == 'QUERY_TIMEOUT':
client.execute("SET QUERY_TIMEOUT_S={0}".format(QUERY_END_TIMEOUT_S))
LOG.info("Submitting query %s", self.query_num)
self.query_handle = client.execute_async(query)
client.wait_for_admission_control(self.query_handle)
admission_result = client.get_admission_result(self.query_handle)
assert len(admission_result) > 0
if "Rejected" in admission_result:
LOG.info("Rejected query %s", self.query_num)
self.query_state = 'REJECTED'
self.query_handle = None
return
elif "Timed out" in admission_result:
LOG.info("Query %s timed out", self.query_num)
self.query_state = 'TIMED OUT'
self.query_handle = None
return
LOG.info("Admission result for query %s : %s", self.query_num, admission_result)
except ImpalaBeeswaxException as e:
LOG.exception(e)
raise e
finally:
self.lock.release()
LOG.info("Admitted query %s", self.query_num)
self.query_state = 'ADMITTED'
# The thread becomes visible to the main thread when it is added to the
# shared list of executing_threads. append() is atomic and thread-safe.
self.executing_threads.append(self)
# Synchronize with the main thread. At this point, the thread is executing a
# query. It needs to wait until the main thread requests it to end its query.
while not self.shutdown:
# The QUERY_TIMEOUT needs to stay active until the main thread requests it
# to end. Otherwise, the query may get cancelled early. Fetch rows 2 times
# per QUERY_TIMEOUT interval to keep the query active.
if self.query_end_behavior == 'QUERY_TIMEOUT' and \
self.query_state != 'COMPLETED':
fetch_result = client.fetch(query, self.query_handle, 1)
assert len(fetch_result.data) == 1, str(fetch_result)
if self.query_state == 'REQUEST_QUERY_END':
self._end_query(client, query)
# The query has released admission control resources
self.query_state = 'COMPLETED'
self.query_handle = None
sleep(QUERY_END_TIMEOUT_S / 6)
except Exception as e:
LOG.exception(e)
# Unknown errors will be raised later
self.error = e
self.query_state = 'ERROR'
finally:
LOG.info("Thread terminating in state=%s", self.query_state)
if client is not None:
client.close()
def _end_query(self, client, query):
"""Bring the query to the appropriate end state defined by self.query_end_behaviour.
Returns once the query has reached that state."""
LOG.info("Ending query %s by %s",
str(self.query_handle.get_handle()), self.query_end_behavior)
if self.query_end_behavior == 'QUERY_TIMEOUT':
# Sleep and wait for the query to be cancelled. The cancellation will
# set the state to EXCEPTION.
start_time = time()
while (client.get_state(self.query_handle) !=
client.QUERY_STATES['EXCEPTION']):
assert (time() - start_time < STRESS_TIMEOUT),\
"Timed out waiting %s seconds for query cancel" % (STRESS_TIMEOUT,)
sleep(1)
elif self.query_end_behavior == 'EOS':
# Fetch all rows so we hit eos.
client.fetch(query, self.query_handle)
elif self.query_end_behavior == 'CLIENT_CANCEL':
client.cancel(self.query_handle)
else:
assert self.query_end_behavior == 'CLIENT_CLOSE'
client.close_query(self.query_handle)
def _check_queries_page_resource_pools(self):
"""Checks that all queries in the '/queries' webpage json have the correct resource
pool (this is called after all queries have been admitted, queued, or rejected, so
they should already have the pool set), or no pool for queries that don't go through
admission control."""
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in itertools.chain(queries_json['in_flight_queries'],
queries_json['completed_queries']):
if query['stmt_type'] == 'QUERY' or query['stmt_type'] == 'DML':
assert query['last_event'] != 'Registered' and \
query['last_event'] != 'Planning finished'
assert query['resource_pool'] == self.pool_name
else:
assert query['resource_pool'] == ''
def _get_queries_page_num_queued(self):
"""Returns the number of queries currently in the 'queued' state from the '/queries'
webpage json"""
num_queued = 0
for impalad in self.impalads:
queries_json = impalad.service.get_debug_webpage_json('/queries')
for query in queries_json['in_flight_queries']:
if query['last_event'] == 'Queued':
num_queued += 1
return num_queued
def wait_on_queries_page_num_queued(self, min_queued, max_queued):
start_time = time()
LOG.info("Waiting for %s <= queued queries <= %s" % (min_queued, max_queued))
actual_queued = self._get_queries_page_num_queued()
while actual_queued < min_queued or actual_queued > max_queued:
assert (time() - start_time < STRESS_TIMEOUT), ("Timed out waiting %s seconds for "
"%s <= queued queries <= %s, %s currently queued.",
STRESS_TIMEOUT, min_queued, max_queued, actual_queued)
sleep(0.1)
actual_queued = self._get_queries_page_num_queued()
LOG.info("Found %s queued queries after %s seconds", actual_queued,
round(time() - start_time, 1))
def run_admission_test(self, vector, additional_query_options):
LOG.info("Starting test case with parameters: %s", vector)
self.impalads = self.cluster.impalads
self.ac_processes = self.get_ac_processes()
round_robin_submission = vector.get_value('round_robin_submission')
submission_delay_ms = vector.get_value('submission_delay_ms')
if not round_robin_submission:
self.impalads = [self.impalads[0]]
self.ac_processes = [self.ac_processes[0]]
num_queries = vector.get_value('num_queries')
assert num_queries >= MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
initial_metrics = self.get_admission_metrics()
log_metrics("Initial metrics: ", initial_metrics)
for query_num in xrange(num_queries):
impalad = self.impalads[query_num % len(self.impalads)]
query_end_behavior = QUERY_END_BEHAVIORS[query_num % len(QUERY_END_BEHAVIORS)]
thread = self.SubmitQueryThread(impalad, additional_query_options, vector,
query_num, query_end_behavior, self.executing_threads)
thread.start()
self.all_threads.append(thread)
sleep(submission_delay_ms / 1000.0)
# Wait for the admission control to make the initial admission decision for all of
# the queries. They should either be admitted immediately, queued, or rejected.
# The test query is chosen that it with remain active on all backends until the test
# ends the query. This prevents queued queries from being dequeued in the background
# without this thread explicitly ending them, so that the test can admit queries in
# discrete waves.
LOG.info("Wait for initial admission decisions")
(metric_deltas, curr_metrics) = self.wait_for_metric_changes(
['admitted', 'queued', 'rejected'], initial_metrics, num_queries)
# Also wait for the test threads that submitted the queries to start executing.
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Check that the admission decisions are reasonable given the test parameters
# The number of admitted and queued requests should be at least the configured limits
# but less than or equal to those limits times the number of impalads.
assert metric_deltas['dequeued'] == 0,\
"Queued queries should not run until others are made to finish"
assert metric_deltas['admitted'] >= MAX_NUM_CONCURRENT_QUERIES,\
"Admitted fewer than expected queries"
assert metric_deltas['admitted'] <= MAX_NUM_CONCURRENT_QUERIES * len(self.impalads),\
"Admitted more than expected queries: at least one daemon over-admitted"
assert metric_deltas['queued'] >=\
min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES),\
"Should have queued more queries before rejecting them"
assert metric_deltas['queued'] <= MAX_NUM_QUEUED_QUERIES * len(self.impalads),\
"Queued too many queries: at least one daemon queued too many"
assert metric_deltas['rejected'] + metric_deltas['admitted'] +\
metric_deltas['queued'] == num_queries,\
"Initial admission decisions don't add up to {0}: {1}".format(
num_queries, str(metric_deltas))
initial_metric_deltas = metric_deltas
# Like above, check that the count from the queries webpage json is reasonable.
min_queued = min(num_queries - metric_deltas['admitted'], MAX_NUM_QUEUED_QUERIES)
max_queued = MAX_NUM_QUEUED_QUERIES * len(self.impalads)
self.wait_on_queries_page_num_queued(min_queued, max_queued)
self._check_queries_page_resource_pools()
# Admit queries in waves until all queries are done. A new wave of admission
# is started by killing some of the running queries.
while len(self.executing_threads) > 0:
curr_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Main loop, curr_metrics: ", curr_metrics)
num_to_end = len(self.executing_threads)
LOG.info("Main loop, will request %s queries to end", num_to_end)
self.end_admitted_queries(num_to_end)
self.wait_for_metric_changes(['released'], curr_metrics, num_to_end)
num_queued_remaining =\
curr_metrics['queued'] - curr_metrics['dequeued'] - curr_metrics['timed-out']
expected_admitted = min(num_queued_remaining, MAX_NUM_CONCURRENT_QUERIES)
(metric_deltas, _) = self.wait_for_metric_changes(
['admitted', 'timed-out'], curr_metrics, expected_admitted)
# The queue timeout is set high for these tests, so we don't expect any queries to
# time out.
assert metric_deltas['admitted'] >= expected_admitted
assert metric_deltas['timed-out'] == 0
self.wait_for_admitted_threads(metric_deltas['admitted'])
# Wait a few topic updates to ensure the admission controllers have reached a steady
# state or we may find an impalad dequeue more requests after we capture metrics.
self.wait_for_statestore_updates(10)
final_metrics = self.get_consistent_admission_metrics(num_queries)
log_metrics("Final metrics: ", final_metrics)
metric_deltas = compute_metric_deltas(final_metrics, initial_metrics)
assert metric_deltas['timed-out'] == 0
if round_robin_submission:
min_expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] >= min_expected_admitted
assert metric_deltas['admitted'] <= min_expected_admitted * len(self.impalads)
assert metric_deltas['admitted'] ==\
initial_metric_deltas['admitted'] + initial_metric_deltas['queued']
assert metric_deltas['queued'] == initial_metric_deltas['queued']
assert metric_deltas['rejected'] == initial_metric_deltas['rejected']
else:
# We shouldn't go over the max number of queries or queue size so we can compute
# the expected number of queries that should have been admitted (which includes the
# number queued as they eventually get admitted as well), queued, and rejected
expected_admitted = MAX_NUM_CONCURRENT_QUERIES + MAX_NUM_QUEUED_QUERIES
assert metric_deltas['admitted'] == expected_admitted
assert metric_deltas['queued'] == MAX_NUM_QUEUED_QUERIES
assert metric_deltas['rejected'] == num_queries - expected_admitted
# All queries should be completed by now.
self.wait_on_queries_page_num_queued(0, 0)
self._check_queries_page_resource_pools()
for thread in self.all_threads:
if thread.error is not None:
raise thread.error
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES,
max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_flags(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
self.pool_name = 'default-pool'
# The pool has no mem resources set, so submitting queries with huge mem_limits
# should be fine. This exercises the code that does the per-pool memory
# accounting (see MemTracker::GetPoolMemReserved()) without actually being throttled.
self.run_admission_test(vector, {'request_pool': self.pool_name,
'mem_limit': sys.maxint})
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_config_args(
fs_allocation_file="fair-scheduler-test2.xml",
llama_site_file="llama-site-test2.xml"),
statestored_args=_STATESTORED_ARGS)
def test_admission_controller_with_configs(self, vector):
self.pool_name = 'root.queueB'
self.run_admission_test(vector, {'request_pool': self.pool_name})
def get_proc_limit(self):
"""Gets the process mem limit as reported by the impalad's mem-tracker metric.
Raises an assertion if not all impalads have the same value."""
limit_metrics = []
for impalad in self.cluster.impalads:
limit_metrics.append(impalad.service.get_metric_value("mem-tracker.process.limit"))
assert limit_metrics[0] == limit_metrics[-1],\
"Not all impalads have the same process limit: %s" % (limit_metrics,)
assert limit_metrics[0] is not None
return limit_metrics[0]
@pytest.mark.execute_serially
@SkipIfOS.redhat6
@CustomClusterTestSuite.with_args(
impalad_args=impalad_admission_ctrl_flags(
max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES,
pool_max_mem=MEM_TEST_LIMIT, proc_mem_limit=MEM_TEST_LIMIT,
queue_wait_timeout_ms=600000),
statestored_args=_STATESTORED_ARGS)
def test_mem_limit(self, vector):
# Impala may set the proc mem limit lower than we think depending on the overcommit
# settings of the OS. It should be fine to continue anyway.
proc_limit = self.get_proc_limit()
if proc_limit != MEM_TEST_LIMIT:
LOG.info("Warning: Process mem limit %s is not expected val %s", proc_limit,
MEM_TEST_LIMIT)
self.pool_name = 'default-pool'
# Each query mem limit (set the query option to override the per-host memory
# estimate) should use a bit less than (total pool mem limit) / #queries so that
# once #queries are running, the total pool mem usage is about at the limit and
# additional incoming requests will be rejected. The actual pool limit on the number
# of running requests is very high so that requests are only queued/rejected due to
# the mem limit.
num_impalads = len(self.cluster.impalads)
query_mem_limit = (proc_limit / MAX_NUM_CONCURRENT_QUERIES / num_impalads) - 1
self.run_admission_test(vector,
{'request_pool': self.pool_name, 'mem_limit': query_mem_limit})
class TestAdmissionControllerStressWithACService(TestAdmissionControllerStress):
"""Runs all of the tests from TestAdmissionControllerStress but with the second impalad
in the minicluster configured to perform all admission control."""
def get_ac_processes(self):
return [self.cluster.impalads[1]]
def get_ac_log_name(self):
return "impalad_node1"
def setup_method(self, method):
if self.exploration_strategy() != 'exhaustive':
pytest.skip('runs only in exhaustive')
PER_IMPALAD_ACS_ARGS = [
'--is_admission_controller=false',
'--is_admission_controller=true',
'--is_admission_controller=false',
]
if 'start_args' not in method.func_dict:
method.func_dict['start_args'] = list()
method.func_dict['start_args'].append(
"--per_impalad_args=" + ";".join(PER_IMPALAD_ACS_ARGS))
if 'impalad_args' not in method.func_dict:
method.func_dict["impalad_args"] = ""
method.func_dict["impalad_args"] +=\
" --admission_control_service_addr=127.0.0.1:27001 "
super(TestAdmissionControllerStress, self).setup_method(method)
|
test.py | import cv2
import time
import socket
import threading
import numpy as np
import os
import face_recognition
import playsound
import logging
# todo: Add GUI for Camera
def estop():
send("emergency")
speaktext(0)
while True:
send("emergency")
def speaktext(tc=0):
os.chdir("text to speak")
if tc == 0:
playsound.playsound("estop.mp3")
elif tc == 1:
playsound.playsound("fric.mp3")
elif tc == 2:
playsound.playsound("fullyoperational.mp3")
elif tc == 3:
playsound.playsound("initinit.mp3")
elif tc == 4:
playsound.playsound("sst.mp3")
elif tc == 5:
playsound.playsound("veac.mp3")
elif tc == 6:
playsound.playsound("wdpo.mp3")
os.chdir("..")
def send(message):
try:
sock.sendto(message.encode(encoding="utf-8"), tello_address)
print("Sending message: " + message)
except Exception as effg:
print("Error sending: " + str(effg))
def receive():
global french
while True:
try:
response, _ = sock.recvfrom(128)
message = response.decode(encoding='utf-8')
print("Received message: " + message)
if french and "ok" in message:
print("resetting command in progress")
french = False
except Exception as exceptiontenthisisaexception:
sock.close()
print("Error receiving: " + str(exceptiontenthisisaexception))
break
def send_with_buffer():
global bs_list
global commands_survey
global french
global taken_off
try:
while True:
if french:
while french:
time.sleep(0)
try:
if bs_list["Top Priority"][0] is not None:
message = bs_list["Top Priority"][0]
if message == "takeoff":
taken_off = True
elif message == "land":
taken_off = False
print("Top Priority Command is being sent")
while True:
try:
sock.sendto(message.encode(encoding="utf-8"), tello_address)
print("Sending message: " + message)
except Exception as eikk:
print("Error sending: " + str(eikk))
try:
response, _ = sock.recvfrom(128)
message = response.decode(encoding='utf-8')
if french and "ok" in message:
break
except Exception as eikk:
print("Error Receiving: " + str(eikk))
sock.close()
break
french = False
except Exception as expoint:
logging.info("Exception: " + str(expoint))
try:
if bs_list["Secondary Priority"][0] is None:
bs_list["Secondary Priority"].extend(commands_survey)
else:
message = bs_list["Secondary Priority"][0]
if message == "takeoff":
taken_off = True
elif message == "land":
taken_off = False
while True:
try:
sock.sendto(message.encode(encoding="utf-8"), tello_address)
print("Sending message: " + message)
except Exception as eikk:
print("Error sending: " + str(eikk))
try:
response, _ = sock.recvfrom(128)
message = response.decode(encoding='utf-8')
if french and "ok" in message:
break
except Exception as eikk:
print(f"Error: {str(eikk)}")
sock.close()
break
french = False
except Exception as eikk:
print(f"Error Something Bad Happened: {eikk}")
bs_list["Secondary Priority"].extend(commands_survey)
print(bs_list)
except Exception as a_very_bad_exception:
print("Big Bad Error: " + str(a_very_bad_exception))
if __name__ == '__main__':
logging.basicConfig(filename='app.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
try:
logging.info("Logging Enabled")
logging.info("System Init: Starting")
speaktext(3)
bs_list = {"Top Priority": [], "Secondary Priority": []}
killing = True
french = True
commands_survey = ["takeoff", "up 30", "down 30", "cw 90", "cw 90", "cw 90", "cw 90", "land"]
taken_off = False
tello_address = ('192.168.10.1', 8889)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 9000))
current_command = 0
receiveThread = threading.Thread(target=receive)
receiveThread.daemon = True
receiveThread.start()
logging.info("Drone Partially Operational")
speaktext(6)
im1 = face_recognition.load_image_file("images/alexander.jpg")
ima1 = face_recognition.face_encodings(im1)[0]
known_face_encodings = [ima1]
known_face_names = ["Alexander Doyle"]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
logging.info("Facial Recognition Init Complete")
speaktext(1)
send("command")
time.sleep(1)
send("streamon")
time.sleep(1)
camera = cv2.VideoCapture('udp://127.0.0.1:11111')
time.sleep(3)
logging.info("Drone Video Stream Enabled")
speaktext(5)
logging.info("Drone Fully Operational")
except Exception as Init_exception:
logging.error("Error While Init: " + str(Init_exception))
else:
try:
maincommandpipethread = threading.Thread(target=send_with_buffer, daemon=True).start()
french = False
speaktext(2)
while True:
ret, frame = camera.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Enemy"
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
print("Person Found: " + name)
if name == "Alexander Doyle":
print("Fugitive Found")
if killing:
if taken_off:
bs_list["Top Priority"].append("forward 100")
bs_list["Top Priority"].append("back 100")
bs_list["Top Priority"].append("land")
elif taken_off:
bs_list["Top Priority"].append("takeoff")
bs_list["Top Priority"].append("forward 100")
bs_list["Top Priority"].append("back 100")
bs_list["Top Priority"].append("land")
else:
logging.warning("Something Weird Happened in your attacking code")
else:
print("Yo, We're Buffering Dude")
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Tello', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
logging.warning("Drone Video Stream Not Visible")
break
sock.close()
logging.warning("Drone Communication Capability Disabled")
camera.release()
cv2.destroyAllWindows()
except Exception as e:
logging.error(f"System Suddenly Stopped: {e}")
exit(0)
finally:
print("System Exit By End of File")
logging.info("System Exit By End of File :)")
|
test_threaded_import.py | # This is a variant of the very old (early 90's) file
# Demo/threads/bug.py. It simply provokes a number of threads into
# trying to import the same module "at the same time".
# There are no pleasant failure modes -- most likely is that Python
# complains several times about module random having no attribute
# randrange, and then Python hangs.
import os
import imp
import sys
import time
import shutil
import unittest
from test.support import (
verbose, import_module, run_unittest, TESTFN, reap_threads, forget, unlink)
threading = import_module('threading')
def task(N, done, done_tasks, errors):
try:
# We don't use modulefinder but still import it in order to stress
# importing of different modules from several threads.
if len(done_tasks) % 2:
import modulefinder
import random
else:
import random
import modulefinder
# This will fail if random is not completely initialized
x = random.randrange(1, 3)
except Exception as e:
errors.append(e.with_traceback(None))
finally:
done_tasks.append(threading.get_ident())
finished = len(done_tasks) == N
if finished:
done.set()
# Create a circular import structure: A -> C -> B -> D -> A
# NOTE: `time` is already loaded and therefore doesn't threaten to deadlock.
circular_imports_modules = {
'A': """if 1:
import time
time.sleep(%(delay)s)
x = 'a'
import C
""",
'B': """if 1:
import time
time.sleep(%(delay)s)
x = 'b'
import D
""",
'C': """import B""",
'D': """import A""",
}
class Finder:
"""A dummy finder to detect concurrent access to its find_module()
method."""
def __init__(self):
self.numcalls = 0
self.x = 0
self.lock = threading.Lock()
def find_module(self, name, path=None):
# Simulate some thread-unsafe behaviour. If calls to find_module()
# are properly serialized, `x` will end up the same as `numcalls`.
# Otherwise not.
with self.lock:
self.numcalls += 1
x = self.x
time.sleep(0.1)
self.x = x + 1
class FlushingFinder:
"""A dummy finder which flushes sys.path_importer_cache when it gets
called."""
def find_module(self, name, path=None):
sys.path_importer_cache.clear()
class ThreadedImportTests(unittest.TestCase):
def setUp(self):
self.old_random = sys.modules.pop('random', None)
def tearDown(self):
# If the `random` module was already initialized, we restore the
# old module at the end so that pickling tests don't fail.
# See http://bugs.python.org/issue3657#msg110461
if self.old_random is not None:
sys.modules['random'] = self.old_random
def check_parallel_module_init(self):
if imp.lock_held():
# This triggers on, e.g., from test import autotest.
raise unittest.SkipTest("can't run when import lock is held")
done = threading.Event()
for N in (20, 50) * 3:
if verbose:
print("Trying", N, "threads ...", end=' ')
# Make sure that random and modulefinder get reimported freshly
for modname in ['random', 'modulefinder']:
try:
del sys.modules[modname]
except KeyError:
pass
errors = []
done_tasks = []
done.clear()
for i in range(N):
t = threading.Thread(target=task,
args=(N, done, done_tasks, errors,))
t.start()
done.wait(60)
self.assertFalse(errors)
if verbose:
print("OK.")
def test_parallel_module_init(self):
self.check_parallel_module_init()
def test_parallel_meta_path(self):
finder = Finder()
sys.meta_path.insert(0, finder)
try:
self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(finder)
def test_parallel_path_hooks(self):
# Here the Finder instance is only used to check concurrent calls
# to path_hook().
finder = Finder()
# In order for our path hook to be called at each import, we need
# to flush the path_importer_cache, which we do by registering a
# dedicated meta_path entry.
flushing_finder = FlushingFinder()
def path_hook(path):
finder.find_module('')
raise ImportError
sys.path_hooks.insert(0, path_hook)
sys.meta_path.append(flushing_finder)
try:
# Flush the cache a first time
flushing_finder.find_module('')
numtests = self.check_parallel_module_init()
self.assertGreater(finder.numcalls, 0)
self.assertEqual(finder.x, finder.numcalls)
finally:
sys.meta_path.remove(flushing_finder)
sys.path_hooks.remove(path_hook)
def test_import_hangers(self):
# In case this test is run again, make sure the helper module
# gets loaded from scratch again.
try:
del sys.modules['test.threaded_import_hangers']
except KeyError:
pass
import test.threaded_import_hangers
self.assertFalse(test.threaded_import_hangers.errors)
def test_circular_imports(self):
# The goal of this test is to exercise implementations of the import
# lock which use a per-module lock, rather than a global lock.
# In these implementations, there is a possible deadlock with
# circular imports, for example:
# - thread 1 imports A (grabbing the lock for A) which imports B
# - thread 2 imports B (grabbing the lock for B) which imports A
# Such implementations should be able to detect such situations and
# resolve them one way or the other, without freezing.
# NOTE: our test constructs a slightly less trivial import cycle,
# in order to better stress the deadlock avoidance mechanism.
delay = 0.5
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
for name, contents in circular_imports_modules.items():
contents = contents % {'delay': delay}
with open(os.path.join(TESTFN, name + ".py"), "wb") as f:
f.write(contents.encode('utf-8'))
self.addCleanup(forget, name)
results = []
def import_ab():
import A
results.append(getattr(A, 'x', None))
def import_ba():
import B
results.append(getattr(B, 'x', None))
t1 = threading.Thread(target=import_ab)
t2 = threading.Thread(target=import_ba)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(set(results), {'a', 'b'})
def test_side_effect_import(self):
code = """if 1:
import threading
def target():
import random
t = threading.Thread(target=target)
t.start()
t.join()"""
sys.path.insert(0, os.curdir)
self.addCleanup(sys.path.remove, os.curdir)
filename = TESTFN + ".py"
with open(filename, "wb") as f:
f.write(code.encode('utf-8'))
self.addCleanup(unlink, filename)
self.addCleanup(forget, TESTFN)
__import__(TESTFN)
@reap_threads
def test_main():
old_switchinterval = None
try:
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(0.00000001)
except AttributeError:
pass
try:
run_unittest(ThreadedImportTests)
finally:
if old_switchinterval is not None:
sys.setswitchinterval(old_switchinterval)
if __name__ == "__main__":
test_main()
|
http1_tests.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import socket
import uuid
from threading import Thread
from time import sleep
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPConnection
from http.client import HTTPException
from system_test import TestCase, TIMEOUT, Logger, Qdrouterd, unittest
from system_test import curl_available, run_curl
TEST_SERVER_ERROR = "TestServer failed to start due to port %s already in use issue"
CURL_VERSION = (7, 47, 0) # minimum required
def _curl_ok():
"""
Returns True if curl is installed and is the proper version for
running http1.1
"""
installed = curl_available()
return installed and installed >= CURL_VERSION
class RequestHandler(BaseHTTPRequestHandler):
"""
Dispatches requests received by the HTTPServer based on the method
"""
protocol_version = 'HTTP/1.1'
def _execute_request(self, tests):
for req, resp, val in tests:
if req.target == self.path:
xhdrs = None
if "test-echo" in self.headers:
xhdrs = {"test-echo":
self.headers["test-echo"]}
self._consume_body()
if not isinstance(resp, list):
resp = [resp]
for r in resp:
r.send_response(self, extra_headers=xhdrs)
self.server.request_count += 1
return
self.send_error(404, "Not Found")
def do_GET(self):
self._execute_request(self.server.system_tests["GET"])
def do_HEAD(self):
self._execute_request(self.server.system_tests["HEAD"])
def do_POST(self):
if self.path == "/SHUTDOWN":
self.send_response(200, "OK")
self.send_header("Content-Length", "13")
self.end_headers()
self.wfile.write(b'Server Closed')
self.wfile.flush()
self.close_connection = True
self.server.server_killed = True
return
self._execute_request(self.server.system_tests["POST"])
def do_PUT(self):
self._execute_request(self.server.system_tests["PUT"])
# these overrides just quiet the test output
# comment them out to help debug:
def log_request(self, code=None, size=None):
pass
def log_message(self, *args, format=None):
pass
def _consume_body(self):
"""
Read the entire body off the rfile. This must be done to allow
multiple requests on the same socket
"""
if self.command == 'HEAD':
return b''
for key, value in self.headers.items():
if key.lower() == 'content-length':
return self.rfile.read(int(value))
if key.lower() == 'transfer-encoding' \
and 'chunked' in value.lower():
body = b''
while True:
header = self.rfile.readline().strip().split(b';')[0]
hlen = int(header, base=16)
if hlen > 0:
data = self.rfile.read(hlen + 2) # 2 = \r\n
body += data[:-2]
else:
self.rfile.readline() # discard last \r\n
break
return body
return b''
class RequestHandler10(RequestHandler):
"""
RequestHandler that forces the server to use HTTP version 1.0 semantics
"""
protocol_version = 'HTTP/1.0'
class MyHTTPServer(HTTPServer):
"""
Adds a switch to the HTTPServer to allow it to exit gracefully
"""
def __init__(self, addr, handler_cls, testcases):
self.system_tests = testcases
self.request_count = 0
HTTPServer.__init__(self, addr, handler_cls)
def server_close(self):
try:
# force immediate close of listening socket
self.socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
HTTPServer.server_close(self)
class ThreadedTestClient:
"""
An HTTP client running in a separate thread
"""
def __init__(self, tests, port, repeat=1):
self._id = uuid.uuid4().hex
self._conn_addr = ("127.0.0.1:%s" % port)
self._tests = tests
self._repeat = repeat
self._logger = Logger(title="TestClient: %s" % self._id,
print_to_console=False)
self._thread = Thread(target=self._run)
self._thread.daemon = True
self.error = None
self.count = 0
self._thread.start()
def _run(self):
self._logger.log("TestClient connecting on %s" % self._conn_addr)
client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
self._logger.log("TestClient connected")
for loop in range(self._repeat):
self._logger.log("TestClient start request %d" % loop)
for op, tests in self._tests.items():
for req, _, val in tests:
self._logger.log("TestClient sending %s %s request" % (op, req.target))
req.send_request(client,
{"test-echo": "%s-%s-%s-%s" % (self._id,
loop,
op,
req.target)})
self._logger.log("TestClient getting %s response" % op)
try:
rsp = client.getresponse()
except HTTPException as exc:
self._logger.log("TestClient response failed: %s" % exc)
self.error = str(exc)
return
self._logger.log("TestClient response %s received" % op)
if val:
try:
body = val.check_response(rsp)
except Exception as exc:
self._logger.log("TestClient response invalid: %s"
% str(exc))
self.error = "client failed: %s" % str(exc)
return
if req.method == "BODY" and body != b'':
self._logger.log("TestClient response invalid: %s"
% "body present!")
self.error = "error: body present!"
return
self.count += 1
self._logger.log("TestClient request %s %s completed!" %
(op, req.target))
client.close()
self._logger.log("TestClient to %s closed" % self._conn_addr)
def wait(self, timeout=TIMEOUT):
self._thread.join(timeout=TIMEOUT)
self._logger.log("TestClient %s shut down" % self._conn_addr)
sleep(0.5) # fudge factor allow socket close to complete
def dump_log(self):
self._logger.dump()
class TestServer:
"""
A HTTPServer running in a separate thread
"""
__test__ = False
@classmethod
def new_server(cls, server_port, client_port, tests, handler_cls=None):
num_attempts = 0
max_attempts = 4
while num_attempts < max_attempts:
try:
# Create an instance of TestServer. This might fail because the port has
# not been relinquished yet. Try for a max of 4 seconds before giving up.
server11 = TestServer(server_port=server_port,
client_port=client_port,
tests=tests,
handler_cls=handler_cls)
# Return the successfully created server.
return server11
except OSError:
# TestServer creation failed. Try again in one second, for a max of 4 seconds.
num_attempts += 1
sleep(1)
return None
def __init__(self, server_port, client_port, tests, handler_cls=None):
self._logger = Logger(title="TestServer", print_to_console=False)
self._client_port = client_port
self._server_addr = ("", server_port)
self._server = MyHTTPServer(self._server_addr,
handler_cls or RequestHandler,
tests)
self._server.allow_reuse_address = True
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def _run(self):
self._logger.log("TestServer listening on %s:%s" % self._server_addr)
try:
self._server.server_killed = False
while not self._server.server_killed:
self._server.handle_request()
except Exception as exc:
self._logger.log("TestServer %s crash: %s" %
(self._server_addr, exc))
raise
self._logger.log("TestServer %s:%s closed" % self._server_addr)
def wait(self, timeout=TIMEOUT):
self._logger.log("TestServer %s:%s shutting down" % self._server_addr)
self.request_count = 0
if self._thread.is_alive():
client = HTTPConnection("127.0.0.1:%s" % self._client_port,
timeout=TIMEOUT)
client.putrequest("POST", "/SHUTDOWN")
client.putheader("Content-Length", "0")
client.endheaders()
# 13 == len('Server Closed')
client.getresponse().read(13)
client.close()
self._thread.join(timeout=TIMEOUT)
if self._server:
self._server.server_close()
self.request_count = self._server.request_count
del self._server
sleep(0.5) # fudge factor allow socket close to complete
def http1_ping(sport, cport):
"""
Test the HTTP path by doing a simple GET request
"""
TEST = {
"GET": [
(RequestMsg("GET", "/GET/ping",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4,
"Content-Type": "text/plain;charset=utf-8"},
body=b'pong'),
ResponseValidator(expect_body=b'pong'))
]
}
server = TestServer.new_server(sport, cport, TEST)
client = ThreadedTestClient(tests=TEST, port=cport)
client.wait()
server.wait()
return client.count, client.error
class ResponseMsg:
"""
A 'hardcoded' HTTP response message. This class writes its response
message when called by the HTTPServer via the BaseHTTPRequestHandler
"""
def __init__(self, status, version=None, reason=None,
headers=None, body=None, error=False):
self.status = status
self.version = version or "HTTP/1.1"
self.reason = reason
self.headers = headers or {}
self.body = body
self.error = error
def send_response(self, handler, extra_headers=None):
extra_headers = extra_headers or {}
if self.error:
handler.send_error(self.status,
message=self.reason)
return
handler.send_response(self.status, self.reason)
for key, value in self.headers.items():
handler.send_header(key, value)
for key, value in extra_headers.items():
handler.send_header(key, value)
handler.end_headers()
if self.body:
handler.wfile.write(self.body)
handler.wfile.flush()
class RequestMsg:
"""
A 'hardcoded' HTTP request message. This class writes its request
message to the HTTPConnection.
"""
def __init__(self, method, target, headers=None, body=None):
self.method = method
self.target = target
self.headers = headers or {}
self.body = body
def send_request(self, conn, extra_headers=None):
extra_headers = extra_headers or {}
conn.putrequest(self.method, self.target)
for key, value in self.headers.items():
conn.putheader(key, value)
for key, value in extra_headers.items():
conn.putheader(key, value)
conn.endheaders()
if self.body:
conn.send(self.body)
class ResponseValidator:
"""
Validate a response as received by the HTTP client
"""
def __init__(self, status=200, expect_headers=None, expect_body=None):
if expect_headers is None:
expect_headers = {}
self.status = status
self.expect_headers = expect_headers
self.expect_body = expect_body
def check_response(self, rsp):
if self.status and rsp.status != self.status:
raise Exception("Bad response code, expected %s got %s"
% (self.status, rsp.status))
for key, value in self.expect_headers.items():
if rsp.getheader(key) != value:
raise Exception("Missing/bad header (%s), expected %s got %s"
% (key, value, rsp.getheader(key)))
body = rsp.read()
if self.expect_body and self.expect_body != body:
raise Exception("Bad response body expected %s got %s"
% (self.expect_body, body))
return body
class CommonHttp1Edge2EdgeTest:
def test_01_concurrent_requests(self):
"""
Test multiple concurrent clients sending streaming messages
"""
REQ_CT = 3 # 3 requests per TEST_*
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_01_concurrent_requests_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_01_concurrent_requests_11",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11_small"
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200)
)],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/test_01_concurrent_requests_10",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": "393216"},
body=b'P' * 393197
+ b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/POST/test_01_concurrent_requests_10",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
# no content-length, server must close conn when done
headers={"Test-Header": "/GET/test_01_concurrent_requests_10_small",
"Content-Type": "text/plain;charset=utf-8"},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Test-Header": "/GET/test_01_concurrent_requests_10",
"Content-Length": "393215",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 393196
+ b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
repeat_ct = 10
client_ct = 4 # per version
clients = []
for _ in range(client_ct):
clients.append(ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=repeat_ct))
clients.append(ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=repeat_ct))
for client in clients:
client.wait()
try:
self.assertIsNone(client.error)
self.assertEqual(repeat_ct * REQ_CT, client.count)
except Exception:
client.dump_log()
raise
server11.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server11.request_count)
server10.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server10.request_count)
def test_02_credit_replenish(self):
"""
Verify credit is replenished by sending > the default credit window
requests across the routers. The default credit window is 250
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_02_credit_replenish",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_02_credit_replenish'),
ResponseValidator(status=200),
),
]
}
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=300)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(300, client.count)
server.wait()
def test_03_server_reconnect(self):
"""
Verify server reconnect logic.
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_03_server_reconnect",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_03_server_reconnect'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
# simulate server loss. Fire up a client which should be granted
# credit since the adaptor does not immediately teardown the server
# links. This will cause the adaptor to run qdr_connection_process
# without a raw connection available to wake the I/O thread..
server.wait()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
# the adaptor will detach the links to the server if the connection
# cannot be reestablished after 2.5 seconds. Restart the server before
# that occurrs to prevent client messages from being released with 503
# status.
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
server.wait()
def test_04_server_pining_for_the_fjords(self):
"""
Test permanent loss of server
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
TESTS_FAIL = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=503),
),
]
}
# Kill the server then issue client requests. These requests will be
# held on the server's outgoing links until they expire (2.5 seconds).
# At that point the client will receive a 503 response.
server.wait()
client = ThreadedTestClient(TESTS_FAIL, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
# ensure links recover once the server re-appears
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
server.wait()
def test_05_large_streaming_msg(self):
"""
Verify large streaming message transfer
"""
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/streaming_test_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# 4 chunks each ~= 600K
body=b'927C1\r\n' + b'0' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'1' * 0x927C0 + b'\r\n'
+ b'927C1\r\n' + b'2' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'3' * 0x927C0 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# two 1.2MB chunk
body=b'124f80\r\n' + b'4' * 0x124F80 + b'\r\n'
+ b'124f80\r\n' + b'5' * 0x124F80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200))
],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/streaming_test_10",
headers={"Header-1": "H" * 2048,
"Content-Length": "2097155",
"Content-Type": "text/plain;charset=utf-8"},
body=b'P' * 2097155),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "1999999",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 1999999),
ResponseValidator(status=200))
],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
client11 = ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=2)
client11.wait()
self.assertIsNone(client11.error)
self.assertEqual(4, client11.count)
client10 = ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=2)
client10.wait()
self.assertIsNone(client10.error)
self.assertEqual(4, client10.count)
server11.wait()
server10.wait()
class CommonHttp1OneRouterTest:
TESTS_11 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len",
headers={"Content-Length": "00"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Length': '1'},
expect_body=b'?')),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4096,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={'Content-Length': '4096'},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/chunked",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'16\r\n'
+ b'Mary had a little pug \r\n'
+ b'1b\r\n'
+ b'Its name was "Skupper-Jack"\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'Mary had a little pug Its name was "Skupper-Jack"')),
(RequestMsg("GET", "/GET/chunked_large",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'1\r\n'
+ b'?\r\n'
+ b'800\r\n'
+ b'X' * 0x800 + b'\r\n'
+ b'13\r\n'
+ b'Y' * 0x13 + b'\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'?' + b'X' * 0x800 + b'Y' * 0x13)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
[ResponseMsg(100, reason="Continue",
headers={"Blab": 1, "Blob": "?"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?')],
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# (RequestMsg("GET", "/GET/no_length",
# headers={"Content-Length": "0"}),
# ResponseMsg(200, reason="OK",
# headers={"Content-Type": "text/plain;charset=utf-8",
# "connection": "close"
# },
# body=b'Hi! ' * 1024 + b'X'),
# ResponseValidator(expect_body=b'Hi! ' * 1024 + b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_02",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"}),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"})),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'8\r\n'
+ b'12345678\r\n'
+ b'f\r\n'
+ b'abcdefghijklmno\r\n'
+ b'000\r\n'
+ b'\r\n'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked"},
body=b'01\r\n'
+ b'!\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
body=b'80\r\n'
+ b'$' * 0x80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'1\r\n$\r\n0\r\n\r\n'),
ResponseValidator(status=201, expect_body=b'$')
),
]
}
# HTTP/1.0 compliant test cases (no chunked, response length unspecified)
TESTS_10 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={"Content-Type": "text/plain;charset=utf-8"},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# test support for "folded headers"
(RequestMsg("GET", "/GET/folded_header_01",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "One\r\n \r\n\tTwo"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
"folded-header":
"One \tTwo"},
expect_body=b'X')),
(RequestMsg("GET", "/GET/folded_header_02",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "\r\n \r\n\tTwo",
"another-header": "three"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
# trim leading and
# trailing ws:
"folded-header":
"Two",
"another-header":
"three"},
expect_body=b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever"},
body=b'12345678abcdefghijklmno'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Content-Length": "5"},
body=b'01234'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Content-Length": "513",
"Content-Type": "text/plain;charset=utf-8"},
body=b'$' * 513),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever"},
body=b'No Content Length'),
ResponseValidator(status=201, expect_body=b'No Content Length')
),
]
}
def _do_request(self, client, tests):
for req, _, val in tests:
req.send_request(client)
rsp = client.getresponse()
try:
body = val.check_response(rsp)
except Exception as exc:
self.fail("request failed: %s" % str(exc))
if req.method == "BODY":
self.assertEqual(b'', body)
def test_001_get(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["GET"])
client.close()
def test_002_head(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["HEAD"])
client.close()
def test_003_post(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["POST"])
client.close()
def test_004_put(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["PUT"])
client.close()
def test_006_head_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["HEAD"])
client.close()
def test_007_post_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["POST"])
client.close()
def test_008_put_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["PUT"])
client.close()
class Http1OneRouterTestBase(TestCase):
# HTTP/1.1 compliant test cases
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address',
{'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1OneRouterTestBase, cls).setUpClass()
cls.http_server11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1Edge2EdgeTestBase(TestCase):
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1Edge2EdgeTestBase, cls).setUpClass()
cls.routers = []
cls.INTA_edge1_port = cls.tester.get_port()
cls.INTA_edge2_port = cls.tester.get_port()
cls.http_server11_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1ClientCloseTestsMixIn:
"""
Generic test functions for simulating HTTP/1.x client connection drops.
"""
def client_request_close_test(self, server_port, client_port, server_mgmt):
"""
Simulate an HTTP client drop while sending a very large PUT request
"""
PING = {
"GET": [
(RequestMsg("GET", "/GET/test_04_client_request_close/ping",
headers={"Content-Length": "0"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)]
}
TESTS = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_04_client_request_close",
headers={
"Content-Length": "500000",
"Content-Type": "text/plain;charset=utf-8"
},
body=b'4' * (500000 - 19) + b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_04_client_request_close",
"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the sending of the PUT
# request
#
fake_request = b'PUT /PUT/test_04_client_request_close HTTP/1.1\r\n' \
+ b'Content-Length: 500000\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n' \
+ b'?' * 50000
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(5)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.close()
# since socket I/O is asynchronous wait until the request arrives
# at the server
expected = len(fake_request)
bytes_in = 0
while expected > bytes_in:
ri = server_mgmt.query(type="org.apache.qpid.dispatch.httpRequestInfo").get_entities()
bytes_in = ri[-1]['bytesIn'] if ri else 0 # most recent request at tail
sleep(0.1)
# now ensure the connection between the router and the HTTP server
# still functions:
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
def client_response_close_test(self, server_port, client_port):
"""
Simulate an HTTP client drop while the server is sending a very large
response message.
"""
PING = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_05_client_response_close/ping",
headers={"Content-Length": "1",
"content-type":
"text/plain;charset=utf-8"},
body=b'X'),
ResponseMsg(201, reason="Created",
headers={"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
big_headers = dict([('Huge%s' % i, chr(ord(b'0') + i) * 8000)
for i in range(10)])
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_05_client_response_close",
headers={
"Content-Length": "0",
"Content-Type": "text/plain;charset=utf-8"
}),
[ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(200,
reason="OK",
headers={"Content-Length": 1000000,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?' * 1000000)],
ResponseValidator(status=200)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the receipt of the
# response
#
fake_request = b'GET /GET/test_05_client_response_close HTTP/1.1\r\n' \
+ b'Content-Length: 0\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n'
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(TIMEOUT)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.recv(1)
fake_client.close()
#
# Verify the server is still reachable
#
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
class Http1CurlTestsMixIn:
"""
Test cases using curl as the command line client
"""
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_get_test(self, host, port, server_port):
"""
Use curl to get a resource
"""
CURL_TESTS = {
"GET": [
(RequestMsg("GET", "/GET/curl_get"),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/curl_get"
},
body=b'END OF TRANSMISSION'),
ResponseValidator())
],
"HEAD": [
(RequestMsg("HEAD", "/HEAD/curl_head",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
get_url = "http://%s:%s/GET/curl_get" % (host, port)
head_url = "http://%s:%s/HEAD/curl_head" % (host, port)
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-I", head_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("App-Header-2", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.0", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
server.wait()
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_put_test(self, host, port, server_port):
"""
Use curl to PUT a resource
"""
CURL_TESTS = {
"PUT": [
(RequestMsg("PUT", "/PUT/curl_put"),
ResponseMsg(201, reason="Created",
headers={
"Test-Header": "/PUT/curl_put",
"content-length": "0"
}),
ResponseValidator())
],
"HEAD": [
(RequestMsg("HEAD", "/HEAD/curl_head",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
put_url = "http://%s:%s/PUT/curl_put" % (host, port)
head_url = "http://%s:%s/HEAD/curl_head" % (host, port)
status, out, err = run_curl(["--http1.1", "-T", ".", put_url],
input="Mary had a little pug."
"\nIts fleece was brown as dirt."
"\nIts color made Mary shrug."
"\nShe should dress it in a shirt.")
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
status, out, err = run_curl(["--http1.1", "-I", head_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("App-Header-2", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-T", ".", put_url],
input="Ph'nglui mglw'nafh Cthulhu"
"\nR'lyeh wgah'nagl fhtagn")
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
server.wait()
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_post_test(self, host, port, server_port):
"""
Use curl to post to a resource
"""
CURL_TESTS = {
"POST": [
(RequestMsg("POST", "/POST/curl_post"),
ResponseMsg(201, reason="Created",
headers={
"Test-Header": "/POST/curl_put",
"content-length": "19",
"Content-Type": "text/plain;charset=utf-8",
},
body=b'END OF TRANSMISSION'),
ResponseValidator())
],
"GET": [
(RequestMsg("GET", "/GET/curl_get",
headers={"Content-Length": "0"}),
ResponseMsg(200, reason="OK",
headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=b'0123456789'),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
post_url = "http://%s:%s/POST/curl_post" % (host, port)
get_url = "http://%s:%s/GET/curl_get" % (host, port)
status, out, err = run_curl(["--http1.1", "-F", "name=Skupper",
"-F", "breed=Pug", post_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("0123456789", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-F", "name=Coco",
"-F", "breed=French Bulldog",
post_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
server.wait()
|
tasks.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple, deque
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
import sys
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV, MINIMAL_EVENTS
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
cleanup_new_process,
)
from awx.main.utils.execution_environments import get_default_pod_spec, CONTAINER_ROOT, to_container_path
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
self.job_created = None
self.recent_event_timings = deque(maxlen=settings.MAX_WEBSOCKET_EVENT_RATE)
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('UTF-8')).decode('UTF-8')}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
# Using z allows the dir to mounted by multiple containers
# Uppercase Z restricts access (in weird ways) to 1 container at a time
params['container_volume_mounts'].append(f'{this_path}:{this_path}:z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# Ansible runner requires that project exists,
# and we will write files in the other folders without pre-creating the folder
for subfolder in ('project', 'inventory', 'env'):
runner_subfolder = os.path.join(path, subfolder)
if not os.path.exists(runner_subfolder):
os.mkdir(runner_subfolder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=os.path.join(private_data_dir, 'env'))
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via -e@file.yml
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
event_data['job_created'] = self.job_created
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
# To prevent overwhelming the broadcast queue, skip some websocket messages
if self.recent_event_timings:
cpu_time = time.time()
first_window_time = self.recent_event_timings[0]
last_window_time = self.recent_event_timings[-1]
if event_data.get('event') in MINIMAL_EVENTS:
should_emit = True # always send some types like playbook_on_stats
elif event_data.get('stdout') == '' and event_data['start_line'] == event_data['end_line']:
should_emit = False # exclude events with no output
else:
should_emit = any(
[
# if 30the most recent websocket message was sent over 1 second ago
cpu_time - first_window_time > 1.0,
# if the very last websocket message came in over 1/30 seconds ago
self.recent_event_timings.maxlen * (cpu_time - last_window_time) > 1.0,
# if the queue is not yet full
len(self.recent_event_timings) != self.recent_event_timings.maxlen,
]
)
if should_emit:
self.recent_event_timings.append(cpu_time)
else:
event_data.setdefault('event_data', {})
event_data['skip_websocket_message'] = True
elif self.recent_event_timings.maxlen:
self.recent_event_timings.append(time.time())
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
self.job_created = str(self.instance.created)
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
res = ansible_runner.interface.run(
project_dir=settings.BASE_DIR,
event_handler=self.event_handler,
finished_callback=self.finished_callback,
status_handler=self.status_handler,
**params,
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):
env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
sync_metafields['scm_clean'] = True # to accomidate force pushes
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
paths = [os.path.join(CONTAINER_ROOT, folder)] + paths
env[env_key] = os.pathsep.join(paths)
if 'ANSIBLE_COLLECTIONS_PATHS' in env:
paths = env['ANSIBLE_COLLECTIONS_PATHS'].split(':')
else:
paths = ['~/.ansible/collections', '/usr/share/ansible/collections']
paths.append('/usr/share/automation-controller/collections')
env['ANSIBLE_COLLECTIONS_PATHS'] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join(CONTAINER_ROOT, rel_path)
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, 'inventory', injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = os.path.join('inventory', injector.filename)
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class TransmitterThread(threading.Thread):
def run(self):
self.exc = None
try:
super().run()
except Exception:
self.exc = sys.exc_info()
class AWXReceptorJob:
def __init__(self, task, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
transmitter_thread = TransmitterThread(target=self.transmit, args=[sockin])
transmitter_thread.start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
if transmitter_thread.exc:
raise transmitter_thread.exc[1].with_traceback(transmitter_thread.exc[2])
transmitter_thread.join()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
@cleanup_new_process
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
try:
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
finally:
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
@cleanup_new_process
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
"pod_pending_timeout": getattr(settings, 'AWX_CONTAINER_GROUP_POD_PENDING_TIMEOUT', "5m"),
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
@cleanup_new_process
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
ee = self.task.instance.execution_environment
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
# Enforce EE Pull Policy
pull_options = {"always": "Always", "missing": "IfNotPresent", "never": "Never"}
if self.task and self.task.instance.execution_environment:
if self.task.instance.execution_environment.pull:
pod_spec['spec']['containers'][0]['imagePullPolicy'] = pull_options[self.task.instance.execution_environment.pull]
if self.task and self.task.instance.is_container_group_task:
# If EE credential is passed, create an imagePullSecret
if self.task.instance.execution_environment and self.task.instance.execution_environment.credential:
# Create pull secret in k8s cluster based on ee cred
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
pm = PodManager(self.task.instance)
secret_name = pm.create_secret(job=self.task.instance)
# Inject secret name into podspec
pod_spec['spec']['imagePullSecrets'] = [{"name": secret_name}]
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
compute.py | # Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from mininet.link import Link
from emuvim.api.openstack.resources.instance_flavor import InstanceFlavor
from emuvim.api.openstack.resources.net import Net
from emuvim.api.openstack.resources.port import Port
from emuvim.api.openstack.resources.port_pair import PortPair
from emuvim.api.openstack.resources.port_pair_group import PortPairGroup
from emuvim.api.openstack.resources.flow_classifier import FlowClassifier
from emuvim.api.openstack.resources.port_chain import PortChain
from emuvim.api.openstack.resources.server import Server
from emuvim.api.openstack.resources.image import Image
from docker import DockerClient
import logging
import threading
import uuid
import time
import emuvim.api.openstack.ip_handler as IP
import hashlib
LOG = logging.getLogger("api.openstack.compute")
class HeatApiStackInvalidException(Exception):
"""
Exception thrown when a submitted stack is invalid.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OpenstackCompute(object):
"""
This class is a datacenter specific compute object that tracks all containers that are running in a datacenter,
as well as networks and configured ports.
It has some stack dependet logic and can check if a received stack is valid.
It also handles start and stop of containers.
"""
def __init__(self):
self.dc = None
self.stacks = dict()
self.computeUnits = dict()
self.routers = dict()
self.flavors = dict()
self._images = dict()
self.nets = dict()
self.ports = dict()
self.port_pairs = dict()
self.port_pair_groups = dict()
self.flow_classifiers = dict()
self.port_chains = dict()
self.compute_nets = dict()
self.dcli = DockerClient(base_url='unix://var/run/docker.sock')
@property
def images(self):
"""
Updates the known images. Asks the docker daemon for a list of all known images and returns
the new dictionary.
:return: Returns the new image dictionary.
:rtype: ``dict``
"""
for image in self.dcli.images.list():
if len(image.tags) > 0:
for t in image.tags:
if t not in self._images:
self._images[t] = Image(t)
return self._images
def add_stack(self, stack):
"""
Adds a new stack to the compute node.
:param stack: Stack dictionary.
:type stack: :class:`heat.resources.stack`
"""
if not self.check_stack(stack):
self.clean_broken_stack(stack)
raise HeatApiStackInvalidException(
"Stack did not pass validity checks")
self.stacks[stack.id] = stack
def clean_broken_stack(self, stack):
for port in stack.ports.values():
if port.id in self.ports:
del self.ports[port.id]
for server in stack.servers.values():
if server.id in self.computeUnits:
del self.computeUnits[server.id]
for net in stack.nets.values():
if net.id in self.nets:
del self.nets[net.id]
def check_stack(self, stack):
"""
Checks all dependencies of all servers, ports and routers and their most important parameters.
:param stack: A reference of the stack that should be checked.
:type stack: :class:`heat.resources.stack`
:return: * *True*: If the stack is completely fine.
* *False*: Else
:rtype: ``bool``
"""
everything_ok = True
for server in stack.servers.values():
for port_name in server.port_names:
if port_name not in stack.ports:
LOG.warning("Server %s of stack %s has a port named %s that is not known." %
(server.name, stack.stack_name, port_name))
everything_ok = False
if server.image is None:
LOG.warning("Server %s holds no image." % (server.name))
everything_ok = False
if server.command is None:
LOG.warning("Server %s holds no command." % (server.name))
everything_ok = False
for port in stack.ports.values():
if port.net_name not in stack.nets:
LOG.warning("Port %s of stack %s has a network named %s that is not known." %
(port.name, stack.stack_name, port.net_name))
everything_ok = False
if port.intf_name is None:
LOG.warning("Port %s has no interface name." % (port.name))
everything_ok = False
if port.ip_address is None:
LOG.warning("Port %s has no IP address." % (port.name))
everything_ok = False
for router in stack.routers.values():
for subnet_name in router.subnet_names:
found = False
for net in stack.nets.values():
if net.subnet_name == subnet_name:
found = True
break
if not found:
LOG.warning("Router %s of stack %s has a network named %s that is not known." %
(router.name, stack.stack_name, subnet_name))
everything_ok = False
return everything_ok
def add_flavor(self, name, cpu, memory,
memory_unit, storage, storage_unit):
"""
Adds a flavor to the stack.
:param name: Specifies the name of the flavor.
:type name: ``str``
:param cpu:
:type cpu: ``str``
:param memory:
:type memory: ``str``
:param memory_unit:
:type memory_unit: ``str``
:param storage:
:type storage: ``str``
:param storage_unit:
:type storage_unit: ``str``
"""
flavor = InstanceFlavor(
name, cpu, memory, memory_unit, storage, storage_unit)
self.flavors[flavor.name] = flavor
return flavor
def deploy_stack(self, stackid):
"""
Deploys the stack and starts the emulation.
:param stackid: An UUID str of the stack
:type stackid: ``str``
:return: * *False*: If the Datacenter is None
* *True*: Else
:rtype: ``bool``
"""
if self.dc is None:
return False
stack = self.stacks[stackid]
self.update_compute_dicts(stack)
# Create the networks first
for server in stack.servers.values():
self._start_compute(server)
return True
def delete_stack(self, stack_id):
"""
Delete a stack and all its components.
:param stack_id: An UUID str of the stack
:type stack_id: ``str``
:return: * *False*: If the Datacenter is None
* *True*: Else
:rtype: ``bool``
"""
if self.dc is None:
return False
# Stop all servers and their links of this stack
for server in self.stacks[stack_id].servers.values():
self.stop_compute(server)
self.delete_server(server)
stack = list(self.stacks[stack_id].nets.values())
while stack:
id = stack.pop().id
self.delete_network(id)
# for net in self.stacks[stack_id].nets.values():
# self.delete_network(net.id)
for port in self.stacks[stack_id].ports.values():
self.delete_port(port.id)
del self.stacks[stack_id]
return True
def update_stack(self, old_stack_id, new_stack):
"""
Determines differences within the old and the new stack and deletes, create or changes only parts that
differ between the two stacks.
:param old_stack_id: The ID of the old stack.
:type old_stack_id: ``str``
:param new_stack: A reference of the new stack.
:type new_stack: :class:`heat.resources.stack`
:return: * *True*: if the old stack could be updated to the new stack without any error.
* *False*: else
:rtype: ``bool``
"""
LOG.debug("updating stack {} with new_stack {}".format(
old_stack_id, new_stack))
if old_stack_id not in self.stacks:
return False
old_stack = self.stacks[old_stack_id]
# Update Stack IDs
for server in old_stack.servers.values():
if server.name in new_stack.servers:
new_stack.servers[server.name].id = server.id
for net in old_stack.nets.values():
if net.name in new_stack.nets:
new_stack.nets[net.name].id = net.id
for subnet in new_stack.nets.values():
if subnet.subnet_name == net.subnet_name:
subnet.subnet_id = net.subnet_id
break
for port in old_stack.ports.values():
if port.name in new_stack.ports:
new_stack.ports[port.name].id = port.id
for router in old_stack.routers.values():
if router.name in new_stack.routers:
new_stack.routers[router.name].id = router.id
# Update the compute dicts to now contain the new_stack components
self.update_compute_dicts(new_stack)
self.update_ip_addresses(old_stack, new_stack)
# Update all interface names - after each port has the correct UUID!!
for port in new_stack.ports.values():
port.create_intf_name()
if not self.check_stack(new_stack):
return False
# Remove unnecessary networks
for net in old_stack.nets.values():
if net.name not in new_stack.nets:
self.delete_network(net.id)
# Remove all unnecessary servers
for server in old_stack.servers.values():
if server.name in new_stack.servers:
if not server.compare_attributes(
new_stack.servers[server.name]):
self.stop_compute(server)
else:
# Delete unused and changed links
for port_name in server.port_names:
if port_name in old_stack.ports and port_name in new_stack.ports:
if not old_stack.ports.get(
port_name) == new_stack.ports.get(port_name):
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == old_stack.ports[port_name].intf_name and \
str(link.intf1.ip) == \
old_stack.ports[port_name].ip_address.split('/')[0]:
self._remove_link(server.name, link)
# Add changed link
self._add_link(server.name,
new_stack.ports[port_name].ip_address,
new_stack.ports[port_name].intf_name,
new_stack.ports[port_name].net_name)
break
else:
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == old_stack.ports[port_name].intf_name and \
str(link.intf1.ip) == old_stack.ports[port_name].ip_address.split('/')[0]:
self._remove_link(server.name, link)
break
# Create new links
for port_name in new_stack.servers[server.name].port_names:
if port_name not in server.port_names:
self._add_link(server.name,
new_stack.ports[port_name].ip_address,
new_stack.ports[port_name].intf_name,
new_stack.ports[port_name].net_name)
else:
self.stop_compute(server)
# Start all new servers
for server in new_stack.servers.values():
if server.name not in self.dc.containers:
self._start_compute(server)
else:
server.emulator_compute = self.dc.containers.get(server.name)
del self.stacks[old_stack_id]
self.stacks[new_stack.id] = new_stack
return True
def update_ip_addresses(self, old_stack, new_stack):
"""
Updates the subnet and the port IP addresses - which should always be in this order!
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
self.update_subnet_cidr(old_stack, new_stack)
self.update_port_addresses(old_stack, new_stack)
def update_port_addresses(self, old_stack, new_stack):
"""
Updates the port IP addresses. First resets all issued addresses. Then get all IP addresses from the old
stack and sets them to the same ports in the new stack. Finally all new or changed instances will get new
IP addresses.
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
for net in new_stack.nets.values():
net.reset_issued_ip_addresses()
for old_port in old_stack.ports.values():
for port in new_stack.ports.values():
if port.compare_attributes(old_port):
for net in new_stack.nets.values():
if net.name == port.net_name:
if net.assign_ip_address(
old_port.ip_address, port.name):
port.ip_address = old_port.ip_address
port.mac_address = old_port.mac_address
else:
port.ip_address = net.get_new_ip_address(
port.name)
for port in new_stack.ports.values():
for net in new_stack.nets.values():
if port.net_name == net.name and not net.is_my_ip(
port.ip_address, port.name):
port.ip_address = net.get_new_ip_address(port.name)
def update_subnet_cidr(self, old_stack, new_stack):
"""
Updates the subnet IP addresses. If the new stack contains subnets from the old stack it will take those
IP addresses. Otherwise it will create new IP addresses for the subnet.
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
for old_subnet in old_stack.nets.values():
IP.free_cidr(old_subnet.get_cidr(), old_subnet.subnet_id)
for subnet in new_stack.nets.values():
subnet.clear_cidr()
for old_subnet in old_stack.nets.values():
if subnet.subnet_name == old_subnet.subnet_name:
if IP.assign_cidr(old_subnet.get_cidr(), subnet.subnet_id):
subnet.set_cidr(old_subnet.get_cidr())
for subnet in new_stack.nets.values():
if IP.is_cidr_issued(subnet.get_cidr()):
continue
cird = IP.get_new_cidr(subnet.subnet_id)
subnet.set_cidr(cird)
return
def update_compute_dicts(self, stack):
"""
Update and add all stack components tho the compute dictionaries.
:param stack: A stack reference, to get all required components.
:type stack: :class:`heat.resources.stack`
"""
for server in stack.servers.values():
self.computeUnits[server.id] = server
if isinstance(server.flavor, dict):
self.add_flavor(server.flavor['flavorName'],
server.flavor['vcpu'],
server.flavor['ram'], 'MB',
server.flavor['storage'], 'GB')
server.flavor = server.flavor['flavorName']
for router in stack.routers.values():
self.routers[router.id] = router
for net in stack.nets.values():
self.nets[net.id] = net
for port in stack.ports.values():
self.ports[port.id] = port
def _start_compute(self, server):
"""
Starts a new compute object (docker container) inside the emulator.
Should only be called by stack modifications and not directly.
:param server: Specifies the compute resource.
:type server: :class:`heat.resources.server`
"""
LOG.debug("Starting new compute resources %s" % server.name)
network = list()
network_dict = dict()
# vim-mocker
return
for port_name in server.port_names:
network_dict = dict()
port = self.find_port_by_name_or_id(port_name)
if port is not None:
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = self.find_network_by_name_or_id(
port.net_name).name
network.append(network_dict)
# default network dict
if len(network) < 1:
network_dict['id'] = server.name + "-eth0"
network_dict[network_dict['id']] = network_dict['id']
network.append(network_dict)
self.compute_nets[server.name] = network
LOG.debug("Network dict: {}".format(network))
c = self.dc.startCompute(server.name, image=server.image, command=server.command,
network=network, flavor_name=server.flavor,
properties=server.properties)
server.emulator_compute = c
for intf in c.intfs.values():
for port_name in server.port_names:
port = self.find_port_by_name_or_id(port_name)
if port is not None:
if intf.name == port.intf_name:
# wait up to one second for the intf to come up
self.timeout_sleep(intf.isUp, 1)
if port.mac_address is not None:
intf.setMAC(port.mac_address)
else:
port.mac_address = intf.MAC()
port.assigned_container = c
# Start the real emulator command now as specified in the dockerfile
config = c.dcinfo.get("Config", dict())
env = config.get("Env", list())
legacy_command_execution = False
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
LOG.info("Executing script in '{}': {}={}"
.format(server.name, var, cmd))
# execute command in new thread to ensure that GK is not
# blocked by VNF
t = threading.Thread(target=c.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
legacy_command_execution = True
break # only execute one command
if not legacy_command_execution:
c.start()
def stop_compute(self, server):
"""
Determines which links should be removed before removing the server itself.
:param server: The server that should be removed
:type server: ``heat.resources.server``
"""
LOG.debug("Stopping container %s with full name %s" %
(server.name, server.full_name))
link_names = list()
for port_name in server.port_names:
prt = self.find_port_by_name_or_id(port_name)
if prt is not None:
link_names.append(prt.intf_name)
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) in link_names:
# Remove all self created links that connect the server to the
# main switch
self._remove_link(server.name, link)
# Stop the server and the remaining connection to the datacenter switch
# self.dc.stopCompute(server.name)
# Only now delete all its ports and the server itself
for port_name in server.port_names:
self.delete_port(port_name)
self.delete_server(server)
def find_server_by_name_or_id(self, name_or_id):
"""
Tries to find the server by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the server.
:type name_or_id: ``str``
:return: Returns the server reference if it was found or None
:rtype: :class:`heat.resources.server`
"""
if name_or_id in self.computeUnits:
return self.computeUnits[name_or_id]
if self._shorten_server_name(name_or_id) in self.computeUnits:
return self.computeUnits[name_or_id]
for server in self.computeUnits.values():
if (server.name == name_or_id or
server.template_name == name_or_id or
server.full_name == name_or_id):
return server
if (server.name == self._shorten_server_name(name_or_id) or
server.template_name == self._shorten_server_name(name_or_id) or
server.full_name == self._shorten_server_name(name_or_id)):
return server
return None
def create_server(self, name, stack_operation=False):
"""
Creates a server with the specified name. Raises an exception when a server with the given name already
exists!
:param name: Name of the new server.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created server.
:rtype: :class:`heat.resources.server`
"""
if self.find_server_by_name_or_id(
name) is not None and not stack_operation:
raise Exception("Server with name %s already exists." % name)
safe_name = self._shorten_server_name(name)
server = Server(safe_name)
server.id = str(uuid.uuid4())
if not stack_operation:
self.computeUnits[server.id] = server
return server
def _shorten_server_name(self, name, char_limit=9):
"""
Docker does not like too long instance names.
This function provides a shorter name if needed
"""
if len(name) > char_limit:
# construct a short name
h = hashlib.sha224(name.encode()).hexdigest()
h = h[0:char_limit]
LOG.debug("Shortened server name '%s' to '%s'" % (name, h))
return name
def delete_server(self, server):
"""
Deletes the given server from the stack dictionary and the computeUnits dictionary.
:param server: Reference of the server that should be deleted.
:type server: :class:`heat.resources.server`
:return: * *False*: If the server name is not in the correct format ('datacentername_stackname_servername') \
or when no stack with the correct stackname was found.
* *True*: Else
:rtype: ``bool``
"""
if server is None:
return False
name_parts = server.name.split('_')
if len(name_parts) > 1:
for stack in self.stacks.values():
if stack.stack_name == name_parts[1]:
stack.servers.pop(server.id, None)
if self.computeUnits.pop(server.id, None) is None:
return False
return True
def find_network_by_name_or_id(self, name_or_id):
"""
Tries to find the network by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the network.
:type name_or_id: ``str``
:return: Returns the network reference if it was found or None
:rtype: :class:`heat.resources.net`
"""
if name_or_id in self.nets:
return self.nets[name_or_id]
print("name_or_id: ", name_or_id)
for net in self.nets.values():
if net.name == name_or_id:
return net
LOG.warning("Could not find net '{}' in {} or {}"
.format(name_or_id,
self.nets.keys(),
[n.name for n in self.nets.values()]))
return None
def create_network(self, name, stack_operation=False):
"""
Creates a new network with the given name. Raises an exception when a network with the given name already
exists!
:param name: Name of the new network.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: :class:`heat.resources.net`
"""
LOG.debug("Creating network with name %s" % name)
if self.find_network_by_name_or_id(
name) is not None and not stack_operation:
LOG.warning(
"Creating network with name %s failed, as it already exists" % name)
raise Exception("Network with name %s already exists." % name)
network = Net(name)
network.id = str(uuid.uuid4())
if not stack_operation:
self.nets[network.id] = network
return network
def delete_network(self, name_or_id):
"""
Deletes the given network.
:param name_or_id: Name or UUID of the network.
:type name_or_id: ``str``
"""
net = self.find_network_by_name_or_id(name_or_id)
if net is None:
raise Exception(
"Network with name or id %s does not exists." % name_or_id)
for stack in self.stacks.values():
stack.nets.pop(net.name, None)
self.nets.pop(net.id, None)
def create_port(self, name, stack_operation=False):
"""
Creates a new port with the given name. Raises an exception when a port with the given name already
exists!
:param name: Name of the new port.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created port.
:rtype: :class:`heat.resources.port`
"""
port = Port(name)
if not stack_operation:
self.ports[port.id] = port
port.create_intf_name()
return port
def find_port_by_name_or_id(self, name_or_id):
"""
Tries to find the port by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the network.
:type name_or_id: ``str``
:return: Returns the port reference if it was found or None
:rtype: :class:`heat.resources.port`
"""
# find by id
if name_or_id in self.ports:
return self.ports[name_or_id]
# find by name
matching_ports = list(filter(
lambda port: port.name == name_or_id or port.template_name == name_or_id,
self.ports.values()
))
matching_ports_count = len(matching_ports)
if matching_ports_count == 1:
return matching_ports[0]
if matching_ports_count > 1:
raise RuntimeError("Ambiguous port name %s" % name_or_id)
return None
def delete_port(self, name_or_id):
"""
Deletes the given port. Raises an exception when the port was not found!
:param name_or_id: UUID or name of the port.
:type name_or_id: ``str``
"""
port = self.find_port_by_name_or_id(name_or_id)
if port is None:
LOG.warning(
"Port with name or id %s does not exist. Can't delete it." % name_or_id)
return
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == port.intf_name:
self._remove_link(link.intf1.node.name, link)
break
self.ports.pop(port.id, None)
for stack in self.stacks.values():
stack.ports.pop(port.name, None)
def create_port_pair(self, name, stack_operation=False):
"""
Creates a new port pair with the given name. Raises an exception when a port pair with the given name already
exists!
:param name: Name of the new port pair.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created port pair.
:rtype: :class:`openstack.resources.port_pair`
"""
port_pair = self.find_port_pair_by_name_or_id(name)
if port_pair is not None and not stack_operation:
logging.warning(
"Creating port pair with name %s failed, as it already exists" % name)
raise Exception("Port pair with name %s already exists." % name)
logging.debug("Creating port pair with name %s" % name)
port_pair = PortPair(name)
if not stack_operation:
self.port_pairs[port_pair.id] = port_pair
return port_pair
def find_port_pair_by_name_or_id(self, name_or_id):
"""
Tries to find the port pair by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the port pair.
:type name_or_id: ``str``
:return: Returns the port pair reference if it was found or None
:rtype: :class:`openstack.resources.port_pair`
"""
if name_or_id in self.port_pairs:
return self.port_pairs[name_or_id]
for port_pair in self.port_pairs.values():
if port_pair.name == name_or_id:
return port_pair
return None
def delete_port_pair(self, name_or_id):
"""
Deletes the given port pair. Raises an exception when the port pair was not found!
:param name_or_id: UUID or name of the port pair.
:type name_or_id: ``str``
"""
port_pair = self.find_port_pair_by_name_or_id(name_or_id)
if port_pair is None:
raise Exception(
"Port pair with name or id %s does not exists." % name_or_id)
self.port_pairs.pop(port_pair.id, None)
def create_port_pair_group(self, name, stack_operation=False):
"""
Creates a new port pair group with the given name. Raises an exception when a port pair group
with the given name already exists!
:param name: Name of the new port pair group.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created port pair group .
:rtype: :class:`openstack.resources.port_pair_group`
"""
port_pair_group = self.find_port_pair_group_by_name_or_id(name)
if port_pair_group is not None and not stack_operation:
logging.warning(
"Creating port pair group with name %s failed, as it already exists" % name)
raise Exception(
"Port pair group with name %s already exists." % name)
logging.debug("Creating port pair group with name %s" % name)
port_pair_group = PortPairGroup(name)
if not stack_operation:
self.port_pair_groups[port_pair_group.id] = port_pair_group
return port_pair_group
def find_port_pair_group_by_name_or_id(self, name_or_id):
"""
Tries to find the port pair group by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the port pair group.
:type name_or_id: ``str``
:return: Returns the port pair group reference if it was found or None
:rtype: :class:`openstack.resources.port_pair_group`
"""
if name_or_id in self.port_pair_groups:
return self.port_pair_groups[name_or_id]
for port_pair_group in self.port_pair_groups.values():
if port_pair_group.name == name_or_id:
return port_pair_group
return None
def delete_port_pair_group(self, name_or_id):
"""
Deletes the given port pair group. Raises an exception when the port pair group was not found!
:param name_or_id: UUID or name of the port pair group.
:type name_or_id: ``str``
"""
port_pair_group = self.find_port_pair_group_by_name_or_id(name_or_id)
if port_pair_group is None:
raise Exception(
"Port pair with name or id %s does not exists." % name_or_id)
self.port_pair_groups.pop(port_pair_group.id, None)
def create_port_chain(self, name, stack_operation=False):
"""
Creates a new port chain with the given name. Raises an exception when a port chain with the given name already
exists!
:param name: Name of the new port chain
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created port chain.
:rtype: :class:`openstack.resources.port_chain.PortChain`
"""
port_chain = self.find_port_chain_by_name_or_id(name)
if port_chain is not None and not stack_operation:
logging.warning(
"Creating port chain with name %s failed, as it already exists" % name)
raise Exception("Port chain with name %s already exists." % name)
logging.debug("Creating port chain with name %s" % name)
port_chain = PortChain(name)
if not stack_operation:
self.port_chains[port_chain.id] = port_chain
return port_chain
def find_port_chain_by_name_or_id(self, name_or_id):
"""
Tries to find the port chain by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the port chain.
:type name_or_id: ``str``
:return: Returns the port chain reference if it was found or None
:rtype: :class:`openstack.resources.port_chain.PortChain`
"""
if name_or_id in self.port_chains:
return self.port_chains[name_or_id]
for port_chain in self.port_chains.values():
if port_chain.name == name_or_id:
return port_chain
return None
def delete_port_chain(self, name_or_id):
"""
Deletes the given port chain. Raises an exception when the port chain was not found!
:param name_or_id: UUID or name of the port chain.
:type name_or_id: ``str``
"""
port_chain = self.find_port_chain_by_name_or_id(name_or_id)
port_chain.uninstall(self)
if port_chain is None:
raise Exception(
"Port chain with name or id %s does not exists." % name_or_id)
self.port_chains.pop(port_chain.id, None)
def create_flow_classifier(self, name, stack_operation=False):
"""
Creates a new flow classifier with the given name. Raises an exception when a flow classifier with the given name already
exists!
:param name: Name of the new flow classifier.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created flow classifier.
:rtype: :class:`openstack.resources.flow_classifier`
"""
flow_classifier = self.find_flow_classifier_by_name_or_id(name)
if flow_classifier is not None and not stack_operation:
logging.warning(
"Creating flow classifier with name %s failed, as it already exists" % name)
raise Exception(
"Flow classifier with name %s already exists." % name)
logging.debug("Creating flow classifier with name %s" % name)
flow_classifier = FlowClassifier(name)
if not stack_operation:
self.flow_classifiers[flow_classifier.id] = flow_classifier
return flow_classifier
def find_flow_classifier_by_name_or_id(self, name_or_id):
"""
Tries to find the flow classifier by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the flow classifier.
:type name_or_id: ``str``
:return: Returns the flow classifier reference if it was found or None
:rtype: :class:`openstack.resources.flow_classifier`
"""
if name_or_id in self.flow_classifiers:
return self.flow_classifiers[name_or_id]
for flow_classifier in self.flow_classifiers.values():
if flow_classifier.name == name_or_id:
return flow_classifier
return None
def delete_flow_classifier(self, name_or_id):
"""
Deletes the given flow classifier. Raises an exception when the flow classifier was not found!
:param name_or_id: UUID or name of the flow classifier.
:type name_or_id: ``str``
"""
flow_classifier = self.find_flow_classifier_by_name_or_id(name_or_id)
if flow_classifier is None:
raise Exception(
"Flow classifier with name or id %s does not exists." % name_or_id)
self.flow_classifiers.pop(flow_classifier.id, None)
def _add_link(self, node_name, ip_address, link_name, net_name):
"""
Adds a new link between datacenter switch and the node with the given name.
:param node_name: Name of the required node.
:type node_name: ``str``
:param ip_address: IP-Address of the node.
:type ip_address: ``str``
:param link_name: Link name.
:type link_name: ``str``
:param net_name: Network name.
:type net_name: ``str``
"""
node = self.dc.net.get(node_name)
params = {'params1': {'ip': ip_address,
'id': link_name,
link_name: net_name},
'intfName1': link_name,
'cls': Link}
link = self.dc.net.addLink(node, self.dc.switch, **params)
OpenstackCompute.timeout_sleep(link.intf1.isUp, 1)
def _remove_link(self, server_name, link):
"""
Removes a link between server and datacenter switch.
:param server_name: Specifies the server where the link starts.
:type server_name: ``str``
:param link: A reference of the link which should be removed.
:type link: :class:`mininet.link`
"""
self.dc.switch.detach(link.intf2)
del self.dc.switch.intfs[self.dc.switch.ports[link.intf2]]
del self.dc.switch.ports[link.intf2]
del self.dc.switch.nameToIntf[link.intf2.name]
self.dc.net.removeLink(link=link)
for intf_key in self.dc.net[server_name].intfs.keys():
if self.dc.net[server_name].intfs[intf_key].link == link:
self.dc.net[server_name].intfs[intf_key].delete()
del self.dc.net[server_name].intfs[intf_key]
@staticmethod
def timeout_sleep(function, max_sleep):
"""
This function will execute a function all 0.1 seconds until it successfully returns.
Will return after `max_sleep` seconds if not successful.
:param function: The function to execute. Should return true if done.
:type function: ``function``
:param max_sleep: Max seconds to sleep. 1 equals 1 second.
:type max_sleep: ``float``
"""
current_time = time.time()
stop_time = current_time + max_sleep
while not function() and current_time < stop_time:
current_time = time.time()
time.sleep(0.1)
|
MirrorDevices.py | # This Macro allows sending the state of the robots from this computer to remote computers
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robodk.html#robolink-py
from robodk.robolink import * # API to communicate with RoboDK
import threading
import queue
# Tablet IP:
# 192.168.1.147
# Phone IP
# 192.168.1.171
IP_SOURCE = 'localhost'
IP_REMOTE = ['192.168.1.147', '192.168.1.171']
PROJECT = 'Offline programming - 3 robots simultaneously.rdk'
def MirrorRobots(q, ip_source, ip_destination, project_path=None):
# Any interaction with RoboDK must be done through Robolink()
# The source and destination devices need a Robolink() instance each
#
RDK_src = Robolink(ip_source)
RDK_dest = Robolink(ip_destination)
# check connection with source device
if RDK_src.Connect() == 0:
print("Cannot connect to source %s" % ip_source)
return
# check connection with target device
if RDK_dest.Connect() == 0:
print("Cannot connect to destination %s" % ip_destination)
return
# open the station (if specified)
if project_path is not None:
path_lib = RDK_dest.getParam('PATH_LIBRARY')
RDK_dest.AddFile(path_lib + '/' + project_path)
# get the robot item identifiers on source device:
robots_src = RDK_src.ItemList(ITEM_TYPE_ROBOT, False)
# get the robot items identifiers on destination device:
robots_dest = RDK_dest.ItemList(ITEM_TYPE_ROBOT, False)
# Loop forever to update the joints
while True:
robot_jointstate = RDK_src.Joints(robots_src)
RDK_dest.setJoints(robots_dest, robot_jointstate)
q.put('Done with %s' % ip_destination)
q = queue.Queue()
RDK = Robolink()
# Open project in source device
if PROJECT is not None:
path_lib = RDK.getParam('PATH_LIBRARY')
RDK.AddFile(path_lib + '/' + PROJECT)
# iterate through all devices and start a new thread
for i in range(len(IP_REMOTE)):
ip_remote = IP_REMOTE[i]
t = threading.Thread(target=MirrorRobots, args=(q, IP_SOURCE, ip_remote, PROJECT))
t.daemon = True
t.start()
print(q.get())
|
asyn.py | import asyncio
import asyncio.events
import functools
import inspect
import os
import re
import sys
import threading
from contextlib import contextmanager
from glob import has_magic
from .callbacks import _DEFAULT_CALLBACK
from .exceptions import FSTimeoutError
from .spec import AbstractFileSystem
from .utils import PY36, is_exception, other_paths
private = re.compile("_[^_]")
async def _runner(event, coro, result, timeout=None):
timeout = timeout if timeout else None # convert 0 or 0.0 to None
if timeout is not None:
coro = asyncio.wait_for(coro, timeout=timeout)
try:
result[0] = await coro
except Exception as ex:
result[0] = ex
finally:
event.set()
if PY36:
grl = asyncio.events._get_running_loop
else:
grl = asyncio.events.get_running_loop
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = grl()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
return_result = result[0]
if isinstance(return_result, asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError from return_result
elif isinstance(return_result, BaseException):
raise return_result
else:
return return_result
iothread = [None] # dedicated fsspec IO thread
loop = [None] # global event loop for any non-async instance
lock = threading.Lock() # for setting exactly one thread
def sync_wrapper(func, obj=None):
"""Given a function, make so can be called in async or blocking contexts
Leave obj=None if defining within a class. Pass the instance if attaching
as an attribute of the instance.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = obj or args[0]
return sync(self.loop, func, *args, **kwargs)
return wrapper
@contextmanager
def _selector_policy():
original_policy = asyncio.get_event_loop_policy()
try:
if (
sys.version_info >= (3, 8)
and os.name == "nt"
and hasattr(asyncio, "WindowsSelectorEventLoopPolicy")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
yield
finally:
asyncio.set_event_loop_policy(original_policy)
def get_running_loop():
if hasattr(asyncio, "get_running_loop"):
return asyncio.get_running_loop()
else:
loop = asyncio._get_running_loop()
if loop is None:
raise RuntimeError("no running event loop")
else:
return loop
def get_loop():
"""Create or return the default fsspec IO loop
The loop will be running on a separate thread.
"""
if loop[0] is None:
with lock:
# repeat the check just in case the loop got filled between the
# previous two calls from another thread
if loop[0] is None:
with _selector_policy():
loop[0] = asyncio.new_event_loop()
th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
th.daemon = True
th.start()
iothread[0] = th
return loop[0]
@contextmanager
def fsspec_loop():
"""Temporarily switch the current event loop to the fsspec's
own loop, and then revert it back after the context gets
terinated.
"""
try:
original_loop = get_running_loop()
except RuntimeError:
original_loop = None
fsspec_loop = get_loop()
try:
asyncio._set_running_loop(fsspec_loop)
yield fsspec_loop
finally:
asyncio._set_running_loop(original_loop)
try:
import resource
except ImportError:
resource = None
ResourceError = OSError
else:
ResourceEror = resource.error
_DEFAULT_BATCH_SIZE = 128
_NOFILES_DEFAULT_BATCH_SIZE = 1280
def _get_batch_size(nofiles=False):
from fsspec.config import conf
if nofiles:
if "nofiles_gather_batch_size" in conf:
return conf["nofiles_gather_batch_size"]
else:
if "gather_batch_size" in conf:
return conf["gather_batch_size"]
if nofiles:
return _NOFILES_DEFAULT_BATCH_SIZE
if resource is None:
return _DEFAULT_BATCH_SIZE
try:
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
except (ImportError, ValueError, ResourceError):
return _DEFAULT_BATCH_SIZE
if soft_limit == resource.RLIM_INFINITY:
return -1
else:
return soft_limit // 8
async def _run_coros_in_chunks(
coros,
batch_size=None,
callback=_DEFAULT_CALLBACK,
timeout=None,
return_exceptions=False,
nofiles=False,
):
"""Run the given coroutines in chunks.
Parameters
----------
coros: list of coroutines to run
batch_size: int or None
Number of coroutines to submit/wait on simultaneously.
If -1, then it will not be any throttling. If
None, it will be inferred from _get_batch_size()
callback: fsspec.callbacks.Callback instance
Gets a relative_update when each coroutine completes
timeout: number or None
If given, each coroutine times out after this time. Note that, since
there are multiple batches, the total run time of this function will in
general be longer
return_exceptions: bool
Same meaning as in asyncio.gather
nofiles: bool
If inferring the batch_size, does this operation involve local files?
If yes, you normally expect smaller batches.
"""
if batch_size is None:
batch_size = _get_batch_size(nofiles=nofiles)
if batch_size == -1:
batch_size = len(coros)
assert batch_size > 0
results = []
for start in range(0, len(coros), batch_size):
chunk = [
asyncio.Task(asyncio.wait_for(c, timeout=timeout))
for c in coros[start : start + batch_size]
]
[
t.add_done_callback(lambda: callback.call("relative_update", 1))
for t in chunk
]
results.extend(
await asyncio.gather(*chunk, return_exceptions=return_exceptions),
)
return results
# these methods should be implemented as async by any async-able backend
async_methods = [
"_ls",
"_cat_file",
"_get_file",
"_put_file",
"_rm_file",
"_cp_file",
"_pipe_file",
"_expand_path",
"_info",
"_isfile",
"_isdir",
"_exists",
"_walk",
"_glob",
"_find",
"_du",
"_size",
"_mkdir",
"_makedirs",
]
class AsyncFileSystem(AbstractFileSystem):
"""Async file operations, default implementations
Passes bulk operations to asyncio.gather for concurrent operation.
Implementations that have concurrent batch operations and/or async methods
should inherit from this class instead of AbstractFileSystem. Docstrings are
copied from the un-underscored method in AbstractFileSystem, if not given.
"""
# note that methods do not have docstring here; they will be copied
# for _* methods and inferred for overridden methods.
async_impl = True
disable_throttling = False
def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
self.asynchronous = asynchronous
self._pid = os.getpid()
if not asynchronous:
self._loop = loop or get_loop()
else:
self._loop = None
self.batch_size = batch_size
super().__init__(*args, **kwargs)
@property
def loop(self):
if self._pid != os.getpid():
raise RuntimeError("This class is not fork-safe")
return self._loop
async def _rm_file(self, path, **kwargs):
raise NotImplementedError
async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
# TODO: implement on_error
batch_size = batch_size or self.batch_size
path = await self._expand_path(path, recursive=recursive)
return await _run_coros_in_chunks(
[self._rm_file(p, **kwargs) for p in path],
batch_size=batch_size,
nofiles=True,
)
async def _copy(
self,
path1,
path2,
recursive=False,
on_error=None,
maxdepth=None,
batch_size=None,
**kwargs,
):
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive)
path2 = other_paths(paths, path2)
batch_size = batch_size or self.batch_size
coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)]
result = await _run_coros_in_chunks(
coros, batch_size=batch_size, return_exceptions=True, nofiles=True
)
for ex in filter(is_exception, result):
if on_error == "ignore" and isinstance(ex, FileNotFoundError):
continue
raise ex
async def _pipe(self, path, value=None, batch_size=None, **kwargs):
if isinstance(path, str):
path = {path: value}
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(
[self._pipe_file(k, v, **kwargs) for k, v in path.items()],
batch_size=batch_size,
nofiles=True,
)
async def _process_limits(self, url, start, end):
"""Helper for "Range"-based _cat_file"""
size = None
suff = False
if start is not None and start < 0:
# if start is negative and end None, end is the "suffix length"
if end is None:
end = -start
start = ""
suff = True
else:
size = size or (await self._info(url))["size"]
start = size + start
elif start is None:
start = 0
if not suff:
if end is not None and end < 0:
if start is not None:
size = size or (await self._info(url))["size"]
end = size + end
elif end is None:
end = ""
if isinstance(end, int):
end -= 1 # bytes range is inclusive
return "bytes=%s-%s" % (start, end)
async def _cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
async def _cat(
self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
):
paths = await self._expand_path(path, recursive=recursive)
coros = [self._cat_file(path, **kwargs) for path in paths]
batch_size = batch_size or self.batch_size
out = await _run_coros_in_chunks(
coros, batch_size=batch_size, nofiles=True, return_exceptions=True
)
if on_error == "raise":
ex = next(filter(is_exception, out), False)
if ex:
raise ex
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
return {
k: v
for k, v in zip(paths, out)
if on_error != "omit" or not is_exception(v)
}
else:
return out[0]
async def _cat_ranges(
self, paths, starts, ends, max_gap=None, batch_size=None, **kwargs
):
# TODO: on_error
if max_gap is not None:
# to be implemented in utils
raise NotImplementedError
if not isinstance(paths, list):
raise TypeError
if not isinstance(starts, list):
starts = [starts] * len(paths)
if not isinstance(ends, list):
ends = [starts] * len(paths)
if len(starts) != len(paths) or len(ends) != len(paths):
raise ValueError
coros = [
self._cat_file(p, start=s, end=e, **kwargs)
for p, s, e in zip(paths, starts, ends)
]
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(coros, batch_size=batch_size, nofiles=True)
async def _put(
self,
lpath,
rpath,
recursive=False,
callback=_DEFAULT_CALLBACK,
batch_size=None,
**kwargs,
):
"""Copy file(s) from local.
Copies a specific file or tree of files (if recursive=True). If rpath
ends with a "/", it will be assumed to be a directory, and target files
will go within.
The put_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from .implementations.local import LocalFileSystem, make_path_posix
rpath = self._strip_protocol(rpath)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(
lpaths, rpath, exists=isinstance(rpath, str) and await self._isdir(rpath)
)
is_dir = {l: os.path.isdir(l) for l in lpaths}
rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
batch_size = batch_size or self.batch_size
coros = []
callback.call("set_size", len(file_pairs))
for lfile, rfile in file_pairs:
callback.branch(lfile, rfile, kwargs)
coros.append(self._put_file(lfile, rfile, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _get_file(self, rpath, lpath, **kwargs):
raise NotImplementedError
async def _get(
self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) to local.
Copies a specific file or tree of files (if recursive=True). If lpath
ends with a "/", it will be assumed to be a directory, and target files
will go within. Can submit a list of paths, which may be glob-patterns
and will be expanded.
The get_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from fsspec.implementations.local import make_path_posix
rpath = self._strip_protocol(rpath)
lpath = make_path_posix(lpath)
rpaths = await self._expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
[os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.lazy_call("set_size", len, lpaths)
for lpath, rpath in zip(lpaths, rpaths):
callback.branch(rpath, lpath, kwargs)
coros.append(self._get_file(rpath, lpath, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _isfile(self, path):
try:
return (await self._info(path))["type"] == "file"
except: # noqa: E722
return False
async def _isdir(self, path):
try:
return (await self._info(path))["type"] == "directory"
except IOError:
return False
async def _size(self, path):
return (await self._info(path)).get("size", None)
async def _sizes(self, paths, batch_size=None):
batch_size = batch_size or self.batch_size
return await _run_coros_in_chunks(
[self._size(p) for p in paths], batch_size=batch_size
)
async def _exists(self, path):
try:
await self._info(path)
return True
except FileNotFoundError:
return False
async def _info(self, path, **kwargs):
raise NotImplementedError
async def _ls(self, path, **kwargs):
raise NotImplementedError
async def _walk(self, path, maxdepth=None, **kwargs):
path = self._strip_protocol(path)
full_dirs = {}
dirs = {}
files = {}
detail = kwargs.pop("detail", False)
try:
listing = await self._ls(path, detail=True, **kwargs)
except (FileNotFoundError, IOError):
if detail:
yield path, {}, {}
else:
yield path, [], []
return
for info in listing:
# each info name must be at least [path]/part , but here
# we check also for names like [path]/part/
pathname = info["name"].rstrip("/")
name = pathname.rsplit("/", 1)[-1]
if info["type"] == "directory" and pathname != path:
# do not include "self" path
full_dirs[pathname] = info
dirs[name] = info
elif pathname == path:
# file-like with same name as give path
files[""] = info
else:
files[name] = info
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs):
yield _
async def _glob(self, path, **kwargs):
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indques = path.find("?") if path.find("?") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indques, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif await self._exists(path):
if not detail:
return [path]
else:
return {path: await self._info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = await self._find(
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
.replace("?", ".")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
async def _du(self, path, total=True, maxdepth=None, **kwargs):
sizes = {}
# async for?
for f in await self._find(path, maxdepth=maxdepth, **kwargs):
info = await self._info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
# async for?
async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if not out and (await self._isfile(path)):
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
async def _expand_path(self, path, recursive=False, maxdepth=None):
if isinstance(path, str):
out = await self._expand_path([path], recursive, maxdepth)
else:
# reduce depth on each recursion level unless None or 0
maxdepth = maxdepth if not maxdepth else maxdepth - 1
out = set()
path = [self._strip_protocol(p) for p in path]
for p in path: # can gather here
if has_magic(p):
bit = set(await self._glob(p))
out |= bit
if recursive:
out |= set(
await self._expand_path(
list(bit), recursive=recursive, maxdepth=maxdepth
)
)
continue
elif recursive:
rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
out |= rec
if p not in out and (recursive is False or (await self._exists(p))):
# should only check once, for the root
out.add(p)
if not out:
raise FileNotFoundError(path)
return list(sorted(out))
async def _mkdir(self, path, create_parents=True, **kwargs):
pass # not necessary to implement, may not have directories
async def _makedirs(self, path, exist_ok=False):
pass # not necessary to implement, may not have directories
def mirror_sync_methods(obj):
"""Populate sync and async methods for obj
For each method will create a sync version if the name refers to an async method
(coroutine) and there is no override in the child class; will create an async
method for the corresponding sync method if there is no implementation.
Uses the methods specified in
- async_methods: the set that an implementation is expected to provide
- default_async_methods: that can be derived from their sync version in
AbstractFileSystem
- AsyncFileSystem: async-specific default coroutines
"""
from fsspec import AbstractFileSystem
for method in async_methods + dir(AsyncFileSystem):
if not method.startswith("_"):
continue
smethod = method[1:]
if private.match(method):
isco = inspect.iscoroutinefunction(getattr(obj, method, None))
unsync = getattr(getattr(obj, smethod, False), "__func__", None)
is_default = unsync is getattr(AbstractFileSystem, smethod, "")
if isco and is_default:
mth = sync_wrapper(getattr(obj, method), obj=obj)
setattr(obj, smethod, mth)
if not mth.__doc__:
mth.__doc__ = getattr(
getattr(AbstractFileSystem, smethod, None), "__doc__", ""
)
class FSSpecCoroutineCancel(Exception):
pass
def _dump_running_tasks(
printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
):
import traceback
if PY36:
raise NotImplementedError("Do not call this on Py 3.6")
tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
if printout:
[task.print_stack() for task in tasks]
out = [
{
"locals": task._coro.cr_frame.f_locals,
"file": task._coro.cr_frame.f_code.co_filename,
"firstline": task._coro.cr_frame.f_code.co_firstlineno,
"linelo": task._coro.cr_frame.f_lineno,
"stack": traceback.format_stack(task._coro.cr_frame),
"task": task if with_task else None,
}
for task in tasks
]
if cancel:
for t in tasks:
cbs = t._callbacks
t.cancel()
asyncio.futures.Future.set_exception(t, exc)
asyncio.futures.Future.cancel(t)
[cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
try:
t._coro.throw(exc) # exits coro, unless explicitly handled
except exc:
pass
return out
|
responder.py | """
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import threading
import datetime
import requests
from flask import Flask, jsonify, request
from programy.utils.logging.ylogger import YLogger
from programy.clients.ping.config import PingResponderConfig
from programy.utils.console.console import outputLog
class PingResponder():
def __init__(self, client):
self._start_time = datetime.datetime.now()
self._config = client.configuration.client_configuration.responder
self._client = client
@property
def config(self):
return self._config
def ping(self):
payload = {"start_time": "%s" % self._start_time,
"client": self._client.id,
"questions": self._client.num_questions
}
payload['bots'] = self._client.get_question_counts()
payload['logging'] = YLogger.snapshot().to_json()
return payload
@staticmethod
def ping_service(ping_app: Flask, config: PingResponderConfig):
if config.ssl_cert_file is not None and \
config.ssl_key_file is not None:
context = (config.ssl_cert_file,
config.ssl_key_file)
outputLog(None, "Healthcheck running in https mode")
try:
ping_app.run(host=config.host,
port=config.port,
debug=config.debug,
ssl_context=context)
except Exception as error:
print("Healthcheck failed to start:", error)
else:
outputLog(None, "Healthcheck running in http mode, careful now !")
try:
ping_app.run(host=config.host,
port=config.port,
debug=config.debug)
except Exception as error:
print("Healthcheck failed to start:", error)
def start_ping_service(self, ping_app: Flask):
t = threading.Thread(target=PingResponder.ping_service, args=(ping_app, self.config))
t.daemon = False
t.start()
return t
def stop_ping_service(self):
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
def shutdown_ping_service(self):
self.unregister_with_healthchecker()
try:
url = "http://%s:%d%s" % (self.config.host, self.config.port, self.config.shutdown)
requests.get(url)
except Exception:
YLogger.error(None, "Failed to shutdown ping service")
def register_with_healthchecker(self):
if self.config.register is not None:
if self.config.host is not None:
host = self.config.host
else:
host = self._client.configuration.client_configuration.host
if self.config.host is not None:
port = self.config.port
else:
port = self._client.configuration.client_configuration.port
try:
url = "%s?name=%s&host=%s&port=%s&url=%s" % (self.config.register, self._client.id,
host, port, self.config.url)
requests.get(url)
except Exception as e:
YLogger.exception(None, "Unable to register with healthchecker", e)
def unregister_with_healthchecker(self):
if self.config.unregister is not None:
try:
url = "%s?name=%s" % (self.config.unregister, self._client.id)
requests.get(url)
except Exception as e:
outputLog(self, e)
YLogger.error(None, "Unable to unregister with healthchecker")
@staticmethod
def init_ping_response(ping_responder):
if ping_responder.config.host is None:
YLogger.info(None, "No REST configuration for ping responder")
outputLog(None, "Healthcheck now running as part of REST Service...")
return
outputLog(None, "Healthcheck now running as separate REST Service...")
ping_app = Flask(ping_responder.config.name)
if ping_responder.config.url is not None:
@ping_app.route(ping_responder.config.url, methods=['GET'])
def ping(): # pylint: disable=unused-variable
return jsonify(ping_responder.ping())
if ping_responder.config.shutdown is not None:
@ping_app.route(ping_responder.config.shutdown, methods=['GET'])
def shutdown(): # pylint: disable=unused-variable
ping_responder.stop_ping_service()
return 'Server shutting down...'
ping_responder.start_ping_service(ping_app)
|
chat_client.py | import time
from threading import Thread
import pat
import patl
from enum import Enum, auto
import logging
from chat_protocol import ChatClient, TEXT
from patutils import LENGTH, MIN_VALUE, MAX_VALUE, ReturnCode
paired = False
silence = True # Not needed, playing other frequency
chat_client: ChatClient = None
class SearchMode(Enum): # Is the device searching or listening
LISTEN = auto()
SEARCH = auto()
def ping_thread(): # The thread that sends the ping
global silence
while not paired: # While the device is not paired
silence = False # THere's something coming from the machine
pat.play_value(MAX_VALUE, LENGTH) # Send a ping
silence = True # There is nothing coming from the machine
time.sleep(LENGTH) # and wait
return
def pair_listener(value): # This listens for the ping from the other machine
global paired
global silence
global chat_client
if value == MIN_VALUE: # If it hears a response ping
paired = True # It's paired
print("PAIRED!") # Outputs paired
chat_client = ChatClient() # Creates the client
chat_client.start() # Starts up the client
return ReturnCode.EXIT
def device_listener(value): # Listenss for devices
global chat_client
global paired
if value == MAX_VALUE: # If it hears a ping
pat.play_value(MIN_VALUE, LENGTH) # Play response ping
paired = True
print("PAIRED!")
chat_client = ChatClient() # Creates the client
chat_client.start() # Starts the client
return ReturnCode.EXIT
def main(mode: SearchMode):
if mode == SearchMode.SEARCH: # If searching
patl.start_listener(pair_listener) # Start the search pair listenr
thread = Thread(target=ping_thread) # Create the ping thread
thread.start() # Start the ping thread
thread.join() # End the ping thread
elif mode == SearchMode.LISTEN: # If listening
patl.start_listener(device_listener, join=True) # Start the device listener
else:
return -1
while True:
if not paired:
continue
message = input("[Sined]: ") # Take input
if message.startswith("/"): # If command
try:
raw_command = message[1:].strip() # The command without the '/'
if raw_command.startswith("?"): # If the command is a request
raw_command = raw_command[1:].strip() # The command without the '?'
split_command = list(
filter(None, raw_command.split(" "))) # Using filter to filter out the empty parts
command = split_command[0] # Get the command
raw_args = ':'.join(split_command[1:]) # Prepare to send
chat_client.request(command, raw_args) # Send the command
else:
split_command = list(filter(None, raw_command.split(" "))) # Get the command split, to obtain the args
command = split_command[0] # Get the command
raw_args = ':'.join(split_command[1:]) # Prepare to send
chat_client.command(command, raw_args) # Send command
except IndexError:
logging.error("Malformed command")
else:
chat_client.command(TEXT, message)
if __name__ == "__main__":
chosen_mode = None
while not chosen_mode:
try:
written = input("[S]earch or [L]isten: ").lower()[0] # Whether searching or listening
if written == "s":
chosen_mode = SearchMode.SEARCH # Searching
if written == "l":
chosen_mode = SearchMode.LISTEN # Listening
except IndexError:
continue
try:
print(f"Program terminated with exit code: {main(chosen_mode)}")
except KeyboardInterrupt:
print("Goodbye!")
|
callbacks.py | # -*- coding: utf8 -*-
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2014, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This module contains the basic callbacks for handling PRIVMSGs.
"""
import re
import copy
import time
from . import shlex
import codecs
import getopt
import inspect
from . import (conf, ircdb, irclib, ircmsgs, ircutils, log, registry,
utils, world)
from .utils import minisix
from .utils.iter import any, all
from .i18n import PluginInternationalization
_ = PluginInternationalization()
def _addressed(nick, msg, prefixChars=None, nicks=None,
prefixStrings=None, whenAddressedByNick=None,
whenAddressedByNickAtEnd=None):
def get(group):
if ircutils.isChannel(target):
group = group.get(target)
return group()
def stripPrefixStrings(payload):
for prefixString in prefixStrings:
if payload.startswith(prefixString):
payload = payload[len(prefixString):].lstrip()
return payload
assert msg.command == 'PRIVMSG'
(target, payload) = msg.args
if not payload:
return ''
if prefixChars is None:
prefixChars = get(conf.supybot.reply.whenAddressedBy.chars)
if whenAddressedByNick is None:
whenAddressedByNick = get(conf.supybot.reply.whenAddressedBy.nick)
if whenAddressedByNickAtEnd is None:
r = conf.supybot.reply.whenAddressedBy.nick.atEnd
whenAddressedByNickAtEnd = get(r)
if prefixStrings is None:
prefixStrings = get(conf.supybot.reply.whenAddressedBy.strings)
# We have to check this before nicks -- try "@google supybot" with supybot
# and whenAddressedBy.nick.atEnd on to see why.
if any(payload.startswith, prefixStrings):
return stripPrefixStrings(payload)
elif payload[0] in prefixChars:
return payload[1:].strip()
if nicks is None:
nicks = get(conf.supybot.reply.whenAddressedBy.nicks)
nicks = list(map(ircutils.toLower, nicks))
else:
nicks = list(nicks) # Just in case.
nicks.insert(0, ircutils.toLower(nick))
# Ok, let's see if it's a private message.
if ircutils.nickEqual(target, nick):
payload = stripPrefixStrings(payload)
while payload and payload[0] in prefixChars:
payload = payload[1:].lstrip()
return payload
# Ok, not private. Does it start with our nick?
elif whenAddressedByNick:
for nick in nicks:
lowered = ircutils.toLower(payload)
if lowered.startswith(nick):
try:
(maybeNick, rest) = payload.split(None, 1)
toContinue = False
while not ircutils.isNick(maybeNick, strictRfc=True):
if maybeNick[-1].isalnum():
toContinue = True
break
maybeNick = maybeNick[:-1]
if toContinue:
continue
if ircutils.nickEqual(maybeNick, nick):
return rest
else:
continue
except ValueError: # split didn't work.
continue
elif whenAddressedByNickAtEnd and lowered.endswith(nick):
rest = payload[:-len(nick)]
possiblePayload = rest.rstrip(' \t,;')
if possiblePayload != rest:
# There should be some separator between the nick and the
# previous alphanumeric character.
return possiblePayload
if get(conf.supybot.reply.whenNotAddressed):
return payload
else:
return ''
def addressed(nick, msg, **kwargs):
"""If msg is addressed to 'name', returns the portion after the address.
Otherwise returns the empty string.
"""
payload = msg.addressed
if payload is not None:
return payload
else:
payload = _addressed(nick, msg, **kwargs)
msg.tag('addressed', payload)
return payload
def canonicalName(command, preserve_spaces=False):
"""Turn a command into its canonical form.
Currently, this makes everything lowercase and removes all dashes and
underscores.
"""
if minisix.PY2 and isinstance(command, unicode):
command = command.encode('utf-8')
elif minisix.PY3 and isinstance(command, bytes):
command = command.decode()
special = '\t-_'
if not preserve_spaces:
special += ' '
reAppend = ''
while command and command[-1] in special:
reAppend = command[-1] + reAppend
command = command[:-1]
return ''.join([x for x in command if x not in special]).lower() + reAppend
def reply(msg, s, prefixNick=None, private=None,
notice=None, to=None, action=None, error=False):
msg.tag('repliedTo')
# Ok, let's make the target:
# XXX This isn't entirely right. Consider to=#foo, private=True.
target = ircutils.replyTo(msg)
if ircutils.isChannel(to):
target = to
if ircutils.isChannel(target):
channel = target
else:
channel = None
if notice is None:
notice = conf.get(conf.supybot.reply.withNotice, channel)
if private is None:
private = conf.get(conf.supybot.reply.inPrivate, channel)
if prefixNick is None:
prefixNick = conf.get(conf.supybot.reply.withNickPrefix, channel)
if error:
notice =conf.get(conf.supybot.reply.error.withNotice, channel) or notice
private=conf.get(conf.supybot.reply.error.inPrivate, channel) or private
s = _('Error: ') + s
if private:
prefixNick = False
if to is None:
target = msg.nick
else:
target = to
if action:
prefixNick = False
if to is None:
to = msg.nick
# Ok, now let's make the payload:
s = ircutils.safeArgument(s)
if not s and not action:
s = _('Error: I tried to send you an empty message.')
if prefixNick and ircutils.isChannel(target):
# Let's may sure we don't do, "#channel: foo.".
if not ircutils.isChannel(to):
s = '%s: %s' % (to, s)
if not ircutils.isChannel(target):
if conf.supybot.reply.withNoticeWhenPrivate():
notice = True
# And now, let's decide whether it's a PRIVMSG or a NOTICE.
msgmaker = ircmsgs.privmsg
if notice:
msgmaker = ircmsgs.notice
# We don't use elif here because actions can't be sent as NOTICEs.
if action:
msgmaker = ircmsgs.action
# Finally, we'll return the actual message.
ret = msgmaker(target, s)
ret.tag('inReplyTo', msg)
return ret
def error(msg, s, **kwargs):
"""Makes an error reply to msg with the appropriate error payload."""
kwargs['error'] = True
msg.tag('isError')
return reply(msg, s, **kwargs)
def getHelp(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
if method.__doc__ is None:
doclines = ['This command has no help. Complain to the author.']
else:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
s = '%s %s' % (name, doclines.pop(0))
if doclines:
help = ' '.join(doclines)
s = '(%s) -- %s' % (ircutils.bold(s), help)
return utils.str.normalizeWhitespace(s)
def getSyntax(method, name=None, doc=None):
if name is None:
name = method.__name__
if doc is None:
doclines = method.__doc__.splitlines()
else:
doclines = doc.splitlines()
return '%s %s' % (name, doclines[0])
class Error(Exception):
"""Generic class for errors in Privmsg callbacks."""
pass
class ArgumentError(Error):
"""The bot replies with a help message when this is raised."""
pass
class SilentError(Error):
"""An error that we should not notify the user."""
pass
class Tokenizer(object):
# This will be used as a global environment to evaluate strings in.
# Evaluation is, of course, necessary in order to allow escaped
# characters to be properly handled.
#
# These are the characters valid in a token. Everything printable except
# double-quote, left-bracket, and right-bracket.
separators = '\x00\r\n \t'
def __init__(self, brackets='', pipe=False, quotes='"'):
if brackets:
self.separators += brackets
self.left = brackets[0]
self.right = brackets[1]
else:
self.left = ''
self.right = ''
self.pipe = pipe
if self.pipe:
self.separators += '|'
self.quotes = quotes
self.separators += quotes
def _handleToken(self, token):
if token[0] == token[-1] and token[0] in self.quotes:
token = token[1:-1]
# FIXME: No need to tell you this is a hack.
# It has to handle both IRC commands and serialized configuration.
#
# Whoever you are, if you make a single modification to this
# code, TEST the code with Python 2 & 3, both with the unit
# tests and on IRC with this: @echo "好"
if minisix.PY2:
try:
token = token.encode('utf8').decode('string_escape')
token = token.decode('utf8')
except:
token = token.decode('string_escape')
else:
token = codecs.getencoder('utf8')(token)[0]
token = codecs.getdecoder('unicode_escape')(token)[0]
try:
token = token.encode('iso-8859-1').decode()
except: # Prevent issue with tokens like '"\\x80"'.
pass
return token
def _insideBrackets(self, lexer):
ret = []
while True:
token = lexer.get_token()
if not token:
raise SyntaxError(_('Missing "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
elif token == self.right:
return ret
elif token == self.left:
ret.append(self._insideBrackets(lexer))
else:
ret.append(self._handleToken(token))
return ret
def tokenize(self, s):
lexer = shlex.shlex(minisix.io.StringIO(s))
lexer.commenters = ''
lexer.quotes = self.quotes
lexer.separators = self.separators
args = []
ends = []
while True:
token = lexer.get_token()
if not token:
break
elif token == '|' and self.pipe:
# The "and self.pipe" might seem redundant here, but it's there
# for strings like 'foo | bar', where a pipe stands alone as a
# token, but shouldn't be treated specially.
if not args:
raise SyntaxError(_('"|" with nothing preceding. I '
'obviously can\'t do a pipe with '
'nothing before the |.'))
ends.append(args)
args = []
elif token == self.left:
args.append(self._insideBrackets(lexer))
elif token == self.right:
raise SyntaxError(_('Spurious "%s". You may want to '
'quote your arguments with double '
'quotes in order to prevent extra '
'brackets from being evaluated '
'as nested commands.') % self.right)
else:
args.append(self._handleToken(token))
if ends:
if not args:
raise SyntaxError(_('"|" with nothing following. I '
'obviously can\'t do a pipe with '
'nothing after the |.'))
args.append(ends.pop())
while ends:
args[-1].append(ends.pop())
return args
def tokenize(s, channel=None):
"""A utility function to create a Tokenizer and tokenize a string."""
pipe = False
brackets = ''
nested = conf.supybot.commands.nested
if nested():
brackets = conf.get(nested.brackets, channel)
if conf.get(nested.pipeSyntax, channel): # No nesting, no pipe.
pipe = True
quotes = conf.get(conf.supybot.commands.quotes, channel)
try:
ret = Tokenizer(brackets=brackets,pipe=pipe,quotes=quotes).tokenize(s)
return ret
except ValueError as e:
raise SyntaxError(str(e))
def formatCommand(command):
return ' '.join(command)
def checkCommandCapability(msg, cb, commandName):
if not isinstance(commandName, minisix.string_types):
commandName = '.'.join(commandName)
plugin = cb.name().lower()
pluginCommand = '%s.%s' % (plugin, commandName)
def checkCapability(capability):
assert ircdb.isAntiCapability(capability)
if ircdb.checkCapability(msg.prefix, capability):
log.info('Preventing %s from calling %s because of %s.',
msg.prefix, pluginCommand, capability)
raise RuntimeError(capability)
try:
antiPlugin = ircdb.makeAntiCapability(plugin)
antiCommand = ircdb.makeAntiCapability(commandName)
antiPluginCommand = ircdb.makeAntiCapability(pluginCommand)
checkCapability(antiPlugin)
checkCapability(antiCommand)
checkCapability(antiPluginCommand)
checkAtEnd = [commandName, pluginCommand]
default = conf.supybot.capabilities.default()
if ircutils.isChannel(msg.args[0]):
channel = msg.args[0]
checkCapability(ircdb.makeChannelCapability(channel, antiCommand))
checkCapability(ircdb.makeChannelCapability(channel, antiPlugin))
checkCapability(ircdb.makeChannelCapability(channel,
antiPluginCommand))
chanPlugin = ircdb.makeChannelCapability(channel, plugin)
chanCommand = ircdb.makeChannelCapability(channel, commandName)
chanPluginCommand = ircdb.makeChannelCapability(channel,
pluginCommand)
checkAtEnd += [chanCommand, chanPlugin, chanPluginCommand]
default &= ircdb.channels.getChannel(channel).defaultAllow
return not (default or \
any(lambda x: ircdb.checkCapability(msg.prefix, x),
checkAtEnd))
except RuntimeError as e:
s = ircdb.unAntiCapability(str(e))
return s
class RichReplyMethods(object):
"""This is a mixin so these replies need only be defined once. It operates
under several assumptions, including the fact that 'self' is an Irc object
of some sort and there is a self.msg that is an IrcMsg."""
def __makeReply(self, prefix, s):
if s:
s = '%s %s' % (prefix, s)
else:
s = prefix
return ircutils.standardSubstitute(self, self.msg, s)
def _getConfig(self, wrapper):
return conf.get(wrapper, self.msg.args[0])
def replySuccess(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.success)
if v:
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
else:
self.noReply()
def replyError(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.error)
if 'msg' in kwargs:
msg = kwargs['msg']
if ircdb.checkCapability(msg.prefix, 'owner'):
v = self._getConfig(conf.supybot.replies.errorOwner)
s = self.__makeReply(v, s)
return self.reply(s, **kwargs)
def replies(self, L, prefixer=None, joiner=None,
onlyPrefixFirst=False, to=None,
oneToOne=None, **kwargs):
if prefixer is None:
prefixer = ''
if joiner is None:
joiner = utils.str.commaAndify
if isinstance(prefixer, minisix.string_types):
prefixer = prefixer.__add__
if isinstance(joiner, minisix.string_types):
joiner = joiner.join
if oneToOne is None: # Can be True, False, or None
if ircutils.isChannel(to):
oneToOne = conf.get(conf.supybot.reply.oneToOne, to)
else:
oneToOne = conf.supybot.reply.oneToOne()
if oneToOne:
return self.reply(prefixer(joiner(L)), to=to, **kwargs)
else:
msg = None
first = True
for s in L:
if onlyPrefixFirst:
if first:
first = False
msg = self.reply(prefixer(s), to=to, **kwargs)
else:
msg = self.reply(s, to=to, **kwargs)
else:
msg = self.reply(prefixer(s), to=to, **kwargs)
return msg
def noReply(self):
self.repliedTo = True
def _error(self, s, Raise=False, **kwargs):
if Raise:
raise Error(s)
else:
return self.error(s, **kwargs)
def errorNoCapability(self, capability, s='', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
log.warning('Denying %s for lacking %q capability.',
self.msg.prefix, capability)
# noCapability means "don't send a specific capability error
# message" not "don't send a capability error message at all", like
# one would think
if self._getConfig(conf.supybot.reply.error.noCapability) or \
capability in conf.supybot.capabilities.private():
v = self._getConfig(conf.supybot.replies.genericNoCapability)
else:
v = self._getConfig(conf.supybot.replies.noCapability)
try:
v %= capability
except TypeError: # No %s in string
pass
s = self.__makeReply(v, s)
if s:
return self._error(s, **kwargs)
def errorPossibleBug(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.possibleBug)
if s:
s += ' (%s)' % v
else:
s = v
return self._error(s, **kwargs)
def errorNotRegistered(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.notRegistered)
return self._error(self.__makeReply(v, s), **kwargs)
def errorNoUser(self, s='', name='that user', **kwargs):
if 'Raise' not in kwargs:
kwargs['Raise'] = True
v = self._getConfig(conf.supybot.replies.noUser)
try:
v = v % name
except TypeError:
log.warning('supybot.replies.noUser should have one "%s" in it.')
return self._error(self.__makeReply(v, s), **kwargs)
def errorRequiresPrivacy(self, s='', **kwargs):
v = self._getConfig(conf.supybot.replies.requiresPrivacy)
return self._error(self.__makeReply(v, s), **kwargs)
def errorInvalid(self, what, given=None, s='', repr=True, **kwargs):
if given is not None:
if repr:
given = _repr(given)
else:
given = '"%s"' % given
v = _('%s is not a valid %s.') % (given, what)
else:
v = _('That\'s not a valid %s.') % what
if 'Raise' not in kwargs:
kwargs['Raise'] = True
if s:
v += ' ' + s
return self._error(v, **kwargs)
_repr = repr
class ReplyIrcProxy(RichReplyMethods):
"""This class is a thin wrapper around an irclib.Irc object that gives it
the reply() and error() methods (as well as everything in RichReplyMethods,
based on those two)."""
def __init__(self, irc, msg):
self.irc = irc
self.msg = msg
def getRealIrc(self):
"""Returns the real irclib.Irc object underlying this proxy chain."""
if isinstance(self.irc, irclib.Irc):
return self.irc
else:
return self.irc.getRealIrc()
# This should make us be considered equal to our irclib.Irc object for
# hashing; an important thing (no more "too many open files" exceptions :))
def __hash__(self):
return hash(self.getRealIrc())
def __eq__(self, other):
return self.getRealIrc() == other
__req__ = __eq__
def __ne__(self, other):
return not (self == other)
__rne__ = __ne__
def error(self, s, msg=None, **kwargs):
if 'Raise' in kwargs and kwargs['Raise']:
if s:
raise Error(s)
else:
raise ArgumentError
if msg is None:
msg = self.msg
m = error(msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def reply(self, s, msg=None, **kwargs):
if msg is None:
msg = self.msg
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
kwargs.pop('noLengthCheck', None)
m = reply(msg, s, **kwargs)
self.irc.queueMsg(m)
return m
def __getattr__(self, attr):
return getattr(self.irc, attr)
SimpleProxy = ReplyIrcProxy # Backwards-compatibility
class NestedCommandsIrcProxy(ReplyIrcProxy):
"A proxy object to allow proper nesting of commands (even threaded ones)."
_mores = ircutils.IrcDict()
def __init__(self, irc, msg, args, nested=0):
assert isinstance(args, list), 'Args should be a list, not a string.'
self.irc = irc
self.msg = msg
self.nested = nested
self.repliedTo = False
if not self.nested and isinstance(irc, self.__class__):
# This means we were given an NestedCommandsIrcProxy instead of an
# irclib.Irc, and so we're obviously nested. But nested wasn't
# set! So we take our given Irc's nested value.
self.nested += irc.nested
maxNesting = conf.supybot.commands.nested.maximum()
if maxNesting and self.nested > maxNesting:
log.warning('%s attempted more than %s levels of nesting.',
self.msg.prefix, maxNesting)
self.error(_('You\'ve attempted more nesting than is '
'currently allowed on this bot.'))
return
# The deepcopy here is necessary for Scheduler; it re-runs already
# tokenized commands. There's a possibility a simple copy[:] would
# work, but we're being careful.
self.args = copy.deepcopy(args)
self.counter = 0
self._resetReplyAttributes()
if not args:
self.finalEvaled = True
self._callInvalidCommands()
else:
self.finalEvaled = False
world.commandsProcessed += 1
self.evalArgs()
def __eq__(self, other):
return other == self.getRealIrc()
def __hash__(self):
return hash(self.getRealIrc())
def _resetReplyAttributes(self):
self.to = None
self.action = None
self.notice = None
self.private = None
self.noLengthCheck = None
if ircutils.isChannel(self.msg.args[0]):
self.prefixNick = conf.get(conf.supybot.reply.withNickPrefix,
self.msg.args[0])
else:
self.prefixNick = conf.supybot.reply.withNickPrefix()
def evalArgs(self, withClass=None):
while self.counter < len(self.args):
self.repliedTo = False
if isinstance(self.args[self.counter], minisix.string_types):
# If it's a string, just go to the next arg. There is no
# evaluation to be done for strings. If, at some point,
# we decided to, say, convert every string using
# ircutils.standardSubstitute, this would be where we would
# probably put it.
self.counter += 1
else:
assert isinstance(self.args[self.counter], list)
# It's a list. So we spawn another NestedCommandsIrcProxy
# to evaluate its args. When that class has finished
# evaluating its args, it will call our reply method, which
# will subsequently call this function again, and we'll
# pick up where we left off via self.counter.
cls = withClass or self.__class__
cls(self, self.msg, self.args[self.counter],
nested=self.nested+1)
# We have to return here because the new NestedCommandsIrcProxy
# might not have called our reply method instantly, since
# its command might be threaded. So (obviously) we can't
# just fall through to self.finalEval.
return
# Once all the list args are evaluated, we then evaluate our own
# list of args, since we're assured that they're all strings now.
assert all(lambda x: isinstance(x, minisix.string_types), self.args)
self.finalEval()
def _callInvalidCommands(self):
log.debug('Calling invalidCommands.')
threaded = False
cbs = []
for cb in self.irc.callbacks:
if hasattr(cb, 'invalidCommand'):
cbs.append(cb)
threaded = threaded or cb.threaded
def callInvalidCommands():
self.repliedTo = False
for cb in cbs:
log.debug('Calling %s.invalidCommand.', cb.name())
try:
cb.invalidCommand(self, self.msg, self.args)
except Error as e:
self.error(str(e))
except Exception as e:
log.exception('Uncaught exception in %s.invalidCommand.',
cb.name())
log.debug('Finished calling %s.invalidCommand.', cb.name())
if self.repliedTo:
log.debug('Done calling invalidCommands: %s.',cb.name())
return
if threaded:
name = 'Thread #%s (for invalidCommands)' % world.threadsSpawned
t = world.SupyThread(target=callInvalidCommands, name=name)
t.setDaemon(True)
t.start()
else:
callInvalidCommands()
def findCallbacksForArgs(self, args):
"""Returns a two-tuple of (command, plugins) that has the command
(a list of strings) and the plugins for which it was a command."""
assert isinstance(args, list)
args = list(map(canonicalName, args))
cbs = []
maxL = []
for cb in self.irc.callbacks:
if not hasattr(cb, 'getCommand'):
continue
L = cb.getCommand(args)
#log.debug('%s.getCommand(%r) returned %r', cb.name(), args, L)
if L and L >= maxL:
maxL = L
cbs.append((cb, L))
assert isinstance(L, list), \
'getCommand now returns a list, not a method.'
assert utils.iter.startswith(L, args), \
'getCommand must return a prefix of the args given. ' \
'(args given: %r, returned: %r)' % (args, L)
log.debug('findCallbacksForArgs: %r', cbs)
cbs = [cb for (cb, L) in cbs if L == maxL]
if len(maxL) == 1:
# Special case: one arg determines the callback. In this case, we
# have to check, in order:
# 1. Whether the arg is the same as the name of a callback. This
# callback would then win.
for cb in cbs:
if cb.canonicalName() == maxL[0]:
return (maxL, [cb])
# 2. Whether a defaultplugin is defined.
defaultPlugins = conf.supybot.commands.defaultPlugins
try:
defaultPlugin = defaultPlugins.get(maxL[0])()
log.debug('defaultPlugin: %r', defaultPlugin)
if defaultPlugin:
cb = self.irc.getCallback(defaultPlugin)
if cb in cbs:
# This is just a sanity check, but there's a small
# possibility that a default plugin for a command
# is configured to point to a plugin that doesn't
# actually have that command.
return (maxL, [cb])
except registry.NonExistentRegistryEntry:
pass
# 3. Whether an importantPlugin is one of the responses.
important = defaultPlugins.importantPlugins()
important = list(map(canonicalName, important))
importants = []
for cb in cbs:
if cb.canonicalName() in important:
importants.append(cb)
if len(importants) == 1:
return (maxL, importants)
return (maxL, cbs)
def finalEval(self):
# Now that we've already iterated through our args and made sure
# that any list of args was evaluated (by spawning another
# NestedCommandsIrcProxy to evaluated it into a string), we can finally
# evaluated our own list of arguments.
assert not self.finalEvaled, 'finalEval called twice.'
self.finalEvaled = True
# Now, the way we call a command is we iterate over the loaded pluings,
# asking each one if the list of args we have interests it. The
# way we do that is by calling getCommand on the plugin.
# The plugin will return a list of args which it considers to be
# "interesting." We will then give our args to the plugin which
# has the *longest* list. The reason we pick the longest list is
# that it seems reasonable that the longest the list, the more
# specific the command is. That is, given a list of length X, a list
# of length X+1 would be even more specific (assuming that both lists
# used the same prefix. Of course, if two plugins return a list of the
# same length, we'll just error out with a message about ambiguity.
(command, cbs) = self.findCallbacksForArgs(self.args)
if not cbs:
# We used to handle addressedRegexps here, but I think we'll let
# them handle themselves in getCommand. They can always just
# return the full list of args as their "command".
self._callInvalidCommands()
elif len(cbs) > 1:
names = sorted([cb.name() for cb in cbs])
command = formatCommand(command)
self.error(format(_('The command %q is available in the %L '
'plugins. Please specify the plugin '
'whose command you wish to call by using '
'its name as a command before %q.'),
command, names, command))
else:
cb = cbs[0]
args = self.args[len(command):]
if world.isMainThread() and \
(cb.threaded or conf.supybot.debug.threadAllCommands()):
t = CommandThread(target=cb._callCommand,
args=(command, self, self.msg, args))
t.start()
else:
cb._callCommand(command, self, self.msg, args)
def reply(self, s, noLengthCheck=False, prefixNick=None, action=None,
private=None, notice=None, to=None, msg=None, sendImmediately=False):
"""
Keyword arguments:
* `noLengthCheck=False`: True if the length shouldn't be checked
(used for 'more' handling)
* `prefixNick=True`: False if the nick shouldn't be prefixed to the
reply.
* `action=False`: True if the reply should be an action.
* `private=False`: True if the reply should be in private.
* `notice=False`: True if the reply should be noticed when the
bot is configured to do so.
* `to=<nick|channel>`: The nick or channel the reply should go to.
Defaults to msg.args[0] (or msg.nick if private)
* `sendImmediately=False`: True if the reply should use sendMsg() which
bypasses conf.supybot.protocols.irc.throttleTime
and gets sent before any queued messages
"""
# These use and or or based on whether or not they default to True or
# False. Those that default to True use and; those that default to
# False use or.
assert not isinstance(s, ircmsgs.IrcMsg), \
'Old code alert: there is no longer a "msg" argument to reply.'
self.repliedTo = True
if sendImmediately:
sendMsg = self.irc.sendMsg
else:
sendMsg = self.irc.queueMsg
if msg is None:
msg = self.msg
if prefixNick is not None:
self.prefixNick = prefixNick
if action is not None:
self.action = self.action or action
if action:
self.prefixNick = False
if notice is not None:
self.notice = self.notice or notice
if private is not None:
self.private = self.private or private
if to is not None:
self.to = self.to or to
# action=True implies noLengthCheck=True and prefixNick=False
self.noLengthCheck=noLengthCheck or self.noLengthCheck or self.action
target = self.private and self.to or self.msg.args[0]
if not isinstance(s, minisix.string_types): # avoid trying to str() unicode
s = str(s) # Allow non-string esses.
if self.finalEvaled:
try:
if isinstance(self.irc, self.__class__):
s = s[:conf.supybot.reply.maximumLength()]
return self.irc.reply(s, to=self.to,
notice=self.notice,
action=self.action,
private=self.private,
prefixNick=self.prefixNick,
noLengthCheck=self.noLengthCheck)
elif self.noLengthCheck:
# noLengthCheck only matters to NestedCommandsIrcProxy, so
# it's not used here. Just in case you were wondering.
m = reply(msg, s, to=self.to,
notice=self.notice,
action=self.action,
private=self.private,
prefixNick=self.prefixNick)
sendMsg(m)
return m
else:
s = ircutils.safeArgument(s)
allowedLength = conf.get(conf.supybot.reply.mores.length,
target)
if not allowedLength: # 0 indicates this.
allowedLength = 470 - len(self.irc.prefix)
allowedLength -= len(msg.nick)
# The '(XX more messages)' may have not the same
# length in the current locale
allowedLength -= len(_('(XX more messages)'))
maximumMores = conf.get(conf.supybot.reply.mores.maximum,
target)
maximumLength = allowedLength * maximumMores
if len(s) > maximumLength:
log.warning('Truncating to %s bytes from %s bytes.',
maximumLength, len(s))
s = s[:maximumLength]
s_too_long = len(s.encode()) < allowedLength \
if minisix.PY3 else len(s) < allowedLength
if s_too_long or \
not conf.get(conf.supybot.reply.mores, target):
# In case we're truncating, we add 20 to allowedLength,
# because our allowedLength is shortened for the
# "(XX more messages)" trailer.
if minisix.PY3:
appended = _('(XX more messages)').encode()
s = s.encode()[:allowedLength+len(appended)]
s = s.decode('utf8', 'ignore')
else:
appended = _('(XX more messages)')
s = s[:allowedLength+len(appended)]
# There's no need for action=self.action here because
# action implies noLengthCheck, which has already been
# handled. Let's stick an assert in here just in case.
assert not self.action
m = reply(msg, s, to=self.to,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick)
sendMsg(m)
return m
msgs = ircutils.wrap(s, allowedLength,
break_long_words=True)
msgs.reverse()
instant = conf.get(conf.supybot.reply.mores.instant,target)
while instant > 1 and msgs:
instant -= 1
response = msgs.pop()
m = reply(msg, response, to=self.to,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick)
sendMsg(m)
# XXX We should somehow allow these to be returned, but
# until someone complains, we'll be fine :) We
# can't return from here, though, for obvious
# reasons.
# return m
if not msgs:
return
response = msgs.pop()
if msgs:
if len(msgs) == 1:
more = _('more message')
else:
more = _('more messages')
n = ircutils.bold('(%i %s)' % (len(msgs), more))
response = '%s %s' % (response, n)
prefix = msg.prefix
if self.to and ircutils.isNick(self.to):
try:
state = self.getRealIrc().state
prefix = state.nickToHostmask(self.to)
except KeyError:
pass # We'll leave it as it is.
mask = prefix.split('!', 1)[1]
self._mores[mask] = msgs
public = ircutils.isChannel(msg.args[0])
private = self.private or not public
self._mores[msg.nick] = (private, msgs)
m = reply(msg, response, to=self.to,
action=self.action,
notice=self.notice,
private=self.private,
prefixNick=self.prefixNick)
sendMsg(m)
return m
finally:
self._resetReplyAttributes()
else:
if msg.ignored:
# Since the final reply string is constructed via
# ' '.join(self.args), the args index for ignored commands
# needs to be popped to avoid extra spaces in the final reply.
self.args.pop(self.counter)
msg.tag('ignored', False)
else:
self.args[self.counter] = s
self.evalArgs()
def error(self, s='', Raise=False, **kwargs):
self.repliedTo = True
if Raise:
if s:
raise Error(s)
else:
raise ArgumentError
if s:
if not isinstance(self.irc, irclib.Irc):
return self.irc.error(s, **kwargs)
else:
m = error(self.msg, s, **kwargs)
self.irc.queueMsg(m)
return m
else:
raise ArgumentError
def __getattr__(self, attr):
return getattr(self.irc, attr)
IrcObjectProxy = NestedCommandsIrcProxy
class CommandThread(world.SupyThread):
"""Just does some extra logging and error-recovery for commands that need
to run in threads.
"""
def __init__(self, target=None, args=(), kwargs={}):
self.command = args[0]
self.cb = target.__self__
threadName = 'Thread #%s (for %s.%s)' % (world.threadsSpawned,
self.cb.name(),
self.command)
log.debug('Spawning thread %s (args: %r)', threadName, args)
self.__parent = super(CommandThread, self)
self.__parent.__init__(target=target, name=threadName,
args=args, kwargs=kwargs)
self.setDaemon(True)
self.originalThreaded = self.cb.threaded
self.cb.threaded = True
def run(self):
try:
self.__parent.run()
finally:
self.cb.threaded = self.originalThreaded
class CommandProcess(world.SupyProcess):
"""Just does some extra logging and error-recovery for commands that need
to run in processes.
"""
def __init__(self, target=None, args=(), kwargs={}):
pn = kwargs.pop('pn', 'Unknown')
cn = kwargs.pop('cn', 'unknown')
procName = 'Process #%s (for %s.%s)' % (world.processesSpawned,
pn,
cn)
log.debug('Spawning process %s (args: %r)', procName, args)
self.__parent = super(CommandProcess, self)
self.__parent.__init__(target=target, name=procName,
args=args, kwargs=kwargs)
def run(self):
self.__parent.run()
class CanonicalString(registry.NormalizedString):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameSet(utils.NormalizingSet):
def normalize(self, s):
return canonicalName(s)
class CanonicalNameDict(utils.InsensitivePreservingDict):
def key(self, s):
return canonicalName(s)
class Disabled(registry.SpaceSeparatedListOf):
sorted = True
Value = CanonicalString
List = CanonicalNameSet
conf.registerGlobalValue(conf.supybot.commands, 'disabled',
Disabled([], _("""Determines what commands are currently disabled. Such
commands will not appear in command lists, etc. They will appear not even
to exist.""")))
class DisabledCommands(object):
def __init__(self):
self.d = CanonicalNameDict()
for name in conf.supybot.commands.disabled():
if '.' in name:
(plugin, command) = name.split('.', 1)
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
else:
self.d[name] = None
def disabled(self, command, plugin=None):
if command in self.d:
if self.d[command] is None:
return True
elif plugin in self.d[command]:
return True
return False
def add(self, command, plugin=None):
if plugin is None:
self.d[command] = None
else:
if command in self.d:
if self.d[command] is not None:
self.d[command].add(plugin)
else:
self.d[command] = CanonicalNameSet([plugin])
def remove(self, command, plugin=None):
if plugin is None:
del self.d[command]
else:
if self.d[command] is not None:
self.d[command].remove(plugin)
class BasePlugin(object):
def __init__(self, *args, **kwargs):
self.cbs = []
for attr in dir(self):
if attr != canonicalName(attr):
continue
obj = getattr(self, attr)
if isinstance(obj, type) and issubclass(obj, BasePlugin):
cb = obj(*args, **kwargs)
setattr(self, attr, cb)
self.cbs.append(cb)
cb.log = log.getPluginLogger('%s.%s' % (self.name(),cb.name()))
super(BasePlugin, self).__init__()
class MetaSynchronizedAndFirewalled(log.MetaFirewall, utils.python.MetaSynchronized):
pass
SynchronizedAndFirewalled = MetaSynchronizedAndFirewalled(
'SynchronizedAndFirewalled', (), {})
class Commands(BasePlugin, SynchronizedAndFirewalled):
__synchronized__ = (
'__call__',
'callCommand',
'invalidCommand',
)
# For a while, a comment stood here to say, "Eventually callCommand." But
# that's wrong, because we can't do generic error handling in this
# callCommand -- plugins need to be able to override callCommand and do
# error handling there (see the Web plugin for an example).
__firewalled__ = {'isCommand': None,
'_callCommand': None}
commandArgs = ['self', 'irc', 'msg', 'args']
# These must be class-scope, so all plugins use the same one.
_disabled = DisabledCommands()
pre_command_callbacks = []
def name(self):
return self.__class__.__name__
def canonicalName(self):
return canonicalName(self.name())
def isDisabled(self, command):
return self._disabled.disabled(command, self.name())
def isCommandMethod(self, name):
"""Returns whether a given method name is a command in this plugin."""
# This function is ugly, but I don't want users to call methods like
# doPrivmsg or __init__ or whatever, and this is good to stop them.
# Don't normalize this name: consider outFilter(self, irc, msg).
# name = canonicalName(name)
if self.isDisabled(name):
return False
if name != canonicalName(name):
return False
if hasattr(self, name):
method = getattr(self, name)
if inspect.ismethod(method):
code = method.__func__.__code__
return inspect.getargs(code)[0] == self.commandArgs
else:
return False
else:
return False
def isCommand(self, command):
"""Convenience, backwards-compatibility, semi-deprecated."""
if isinstance(command, minisix.string_types):
return self.isCommandMethod(command)
else:
# Since we're doing a little type dispatching here, let's not be
# too liberal.
assert isinstance(command, list)
return self.getCommand(command) == command
def getCommand(self, args, stripOwnName=True):
assert args == list(map(canonicalName, args))
first = args[0]
for cb in self.cbs:
if first == cb.canonicalName():
return cb.getCommand(args)
if first == self.canonicalName() and len(args) > 1 and \
stripOwnName:
ret = self.getCommand(args[1:], stripOwnName=False)
if ret:
return [first] + ret
if self.isCommandMethod(first):
return [first]
return []
def getCommandMethod(self, command):
"""Gets the given command from this plugin."""
#print '*** %s.getCommandMethod(%r)' % (self.name(), command)
assert not isinstance(command, minisix.string_types)
assert command == list(map(canonicalName, command))
assert self.getCommand(command) == command
for cb in self.cbs:
if command[0] == cb.canonicalName():
return cb.getCommandMethod(command)
if len(command) > 1:
assert command[0] == self.canonicalName()
return self.getCommandMethod(command[1:])
else:
method = getattr(self, command[0])
if inspect.ismethod(method):
code = method.__func__.__code__
if inspect.getargs(code)[0] == self.commandArgs:
return method
else:
raise AttributeError
def listCommands(self, pluginCommands=[]):
commands = set(pluginCommands)
for s in dir(self):
if self.isCommandMethod(s):
commands.add(s)
for cb in self.cbs:
name = cb.canonicalName()
for command in cb.listCommands():
if command == name:
commands.add(command)
else:
commands.add(' '.join([name, command]))
L = list(commands)
L.sort()
return L
def callCommand(self, command, irc, msg, *args, **kwargs):
# We run all callbacks before checking if one of them returned True
if any(bool, list(cb(self, command, irc, msg, *args, **kwargs)
for cb in self.pre_command_callbacks)):
return
method = self.getCommandMethod(command)
method(irc, msg, *args, **kwargs)
def _callCommand(self, command, irc, msg, *args, **kwargs):
if irc.nick == msg.args[0]:
self.log.info('%s called in private by %q.', formatCommand(command),
msg.prefix)
else:
self.log.info('%s called on %s by %q.', formatCommand(command),
msg.args[0], msg.prefix)
# XXX I'm being extra-special-careful here, but we need to refactor
# this.
try:
cap = checkCommandCapability(msg, self, command)
if cap:
irc.errorNoCapability(cap)
return
for name in command:
cap = checkCommandCapability(msg, self, name)
if cap:
irc.errorNoCapability(cap)
return
try:
self.callingCommand = command
self.callCommand(command, irc, msg, *args, **kwargs)
finally:
self.callingCommand = None
except SilentError:
pass
except (getopt.GetoptError, ArgumentError) as e:
self.log.debug('Got %s, giving argument error.',
utils.exnToString(e))
help = self.getCommandHelp(command)
if 'command has no help.' in help:
# Note: this case will never happen, unless 'checkDoc' is set
# to False.
irc.error(_('Invalid arguments for %s.') % formatCommand(command))
else:
irc.reply(help)
except (SyntaxError, Error) as e:
self.log.debug('Error return: %s', utils.exnToString(e))
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in %s.', command)
if conf.supybot.reply.error.detailed():
irc.error(utils.exnToString(e))
else:
irc.replyError(msg=msg)
def getCommandHelp(self, command, simpleSyntax=None):
method = self.getCommandMethod(command)
help = getHelp
chan = None
if dynamic.msg is not None:
chan = dynamic.msg.args[0]
if simpleSyntax is None:
simpleSyntax = conf.get(conf.supybot.reply.showSimpleSyntax, chan)
if simpleSyntax:
help = getSyntax
if hasattr(method, '__doc__'):
return help(method, name=formatCommand(command))
else:
return format(_('The %q command has no help.'),
formatCommand(command))
class PluginMixin(BasePlugin, irclib.IrcCallback):
public = True
alwaysCall = ()
threaded = False
noIgnore = False
classModule = None
Proxy = NestedCommandsIrcProxy
def __init__(self, irc):
myName = self.name()
self.log = log.getPluginLogger(myName)
self.__parent = super(PluginMixin, self)
self.__parent.__init__(irc)
# We can't do this because of the specialness that Owner and Misc do.
# I guess plugin authors will have to get the capitalization right.
# self.callAfter = map(str.lower, self.callAfter)
# self.callBefore = map(str.lower, self.callBefore)
def canonicalName(self):
return canonicalName(self.name())
def __call__(self, irc, msg):
irc = SimpleProxy(irc, msg)
if msg.command == 'PRIVMSG':
if hasattr(self.noIgnore, '__call__'):
noIgnore = self.noIgnore(irc, msg)
else:
noIgnore = self.noIgnore
if noIgnore or \
not ircdb.checkIgnored(msg.prefix, msg.args[0]) or \
not ircutils.isUserHostmask(msg.prefix): # Some services impl.
self.__parent.__call__(irc, msg)
else:
self.__parent.__call__(irc, msg)
def registryValue(self, name, channel=None, value=True):
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if channel is not None:
if ircutils.isChannel(channel):
group = group.get(channel)
else:
self.log.debug('%s: registryValue got channel=%r', plugin,
channel)
if value:
return group()
else:
return group
def setRegistryValue(self, name, value, channel=None):
plugin = self.name()
group = conf.supybot.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
if channel is None:
group.setValue(value)
else:
group.get(channel).setValue(value)
def userValue(self, name, prefixOrName, default=None):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
return None
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
return group.get(id)()
def setUserValue(self, name, prefixOrName, value,
ignoreNoUser=True, setValue=True):
try:
id = str(ircdb.users.getUserId(prefixOrName))
except KeyError:
if ignoreNoUser:
return
else:
raise
plugin = self.name()
group = conf.users.plugins.get(plugin)
names = registry.split(name)
for name in names:
group = group.get(name)
group = group.get(id)
if setValue:
group.setValue(value)
else:
group.set(value)
def getPluginHelp(self):
if hasattr(self, '__doc__'):
return self.__doc__
else:
return None
class Plugin(PluginMixin, Commands):
pass
Privmsg = Plugin # Backwards compatibility.
class PluginRegexp(Plugin):
"""Same as Plugin, except allows the user to also include regexp-based
callbacks. All regexp-based callbacks must be specified in the set (or
list) attribute "regexps", "addressedRegexps", or "unaddressedRegexps"
depending on whether they should always be triggered, triggered only when
the bot is addressed, or triggered only when the bot isn't addressed.
"""
flags = re.I
regexps = ()
"""'regexps' methods are called whether the message is addressed or not."""
addressedRegexps = ()
"""'addressedRegexps' methods are called only when the message is addressed,
and then, only with the payload (i.e., what is returned from the
'addressed' function."""
unaddressedRegexps = ()
"""'unaddressedRegexps' methods are called only when the message is *not*
addressed."""
Proxy = SimpleProxy
def __init__(self, irc):
self.__parent = super(PluginRegexp, self)
self.__parent.__init__(irc)
self.res = []
self.addressedRes = []
self.unaddressedRes = []
for name in self.regexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.res.append((r, name))
for name in self.addressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.addressedRes.append((r, name))
for name in self.unaddressedRegexps:
method = getattr(self, name)
r = re.compile(method.__doc__, self.flags)
self.unaddressedRes.append((r, name))
def _callRegexp(self, name, irc, msg, m):
method = getattr(self, name)
try:
method(irc, msg, m)
except Error as e:
irc.error(str(e))
except Exception as e:
self.log.exception('Uncaught exception in _callRegexp:')
def invalidCommand(self, irc, msg, tokens):
s = ' '.join(tokens)
for (r, name) in self.addressedRes:
for m in r.finditer(s):
self._callRegexp(name, irc, msg, m)
def doPrivmsg(self, irc, msg):
if msg.isError:
return
proxy = self.Proxy(irc, msg)
if not msg.addressed:
for (r, name) in self.unaddressedRes:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
for (r, name) in self.res:
for m in r.finditer(msg.args[1]):
self._callRegexp(name, proxy, msg, m)
PrivmsgCommandAndRegexp = PluginRegexp
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
pimongame.py | #importing necessary library
import RPi.GPIO as GPIO
import threading
import time
import random
import os
from time import sleep
# green, White, Yellow, RED
LIGHTS = [13, 26, 19, 6] #in BCM mode
#GPIO Pin for button Press
#Corresponding button FOr Led As Above
BUTTONS = [17, 22, 27, 4] #in BCM mode
# values you can change that affect game play
speed = 0.25
# Various flags used to signal game status
is_displaying_pattern = False
is_won_current_level = False
is_game_over = False
play = 0
# game state
current_level = 1
current_step_of_level = 0
#list for storing LED's pattern
pattern = []
flag = 0
#Function to initialize Gpio Pin and setting mode
def initialize_gpio():
GPIO.setmode(GPIO.BCM)
GPIO.setup(LIGHTS, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(BUTTONS, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(5, GPIO.IN)
GPIO.setup(9, GPIO.OUT)
#Function to check Whether sequence of button pressed is same as sequence of LED's glown
def verify_player_selection(channel):
#print (channel)
global current_step_of_level, current_level, is_won_current_level, is_game_over
if not is_displaying_pattern and not is_won_current_level and not is_game_over:
#Function to glow led For corresponding Switch
flash_led_for_button(channel)
if channel == BUTTONS[pattern[current_step_of_level]]:
current_step_of_level += 1
if current_step_of_level >= current_level:
current_level += 1
is_won_current_level = True
current_score = current_level-1
print("Your Current Score:------- %d"%current_score)
else:
is_game_over = True
def verify_player_auto():
if(play):
if GPIO.input(4) == False :
GPIO.output(6,True) ## Turn on Led
time.sleep(.2)
GPIO.output(6,False)
verify_player_selection(4)
if GPIO.input(27) == False :
GPIO.output(19,True) ## Turn on Led
time.sleep(.2)
GPIO.output(19,False)
verify_player_selection(27)
if GPIO.input(22) == False :
GPIO.output(26,True) ## Turn on Led
time.sleep(.2)
GPIO.output(26,False)
verify_player_selection(22)
if GPIO.input(17) == False :
GPIO.output(13,True) ## Turn on Led
time.sleep(.2)
GPIO.output(13,False)
verify_player_selection(17)
#Function to glow LED For corresponding Button Press
def flash_led_for_button(button_channel):
led = LIGHTS[BUTTONS.index(button_channel)]
GPIO.output(led, GPIO.HIGH)
time.sleep(0.2)
GPIO.output(led, GPIO.LOW)
#Function to define new Pattern for Led Where one more LED will Glow in comparision to previous state
def add_new_color_to_pattern():
global is_won_current_level, current_step_of_level
is_won_current_level = False
current_step_of_level = 0
#Emptying the pattern of LED's First
pattern[:]=[]
#Appending new Pattern
for i in range(current_level):
next_color = random.randint(0, 3)
pattern.append(next_color)
#Function To display the pattern Of LED's Glown
def display_pattern_to_player():
global is_displaying_pattern
is_displaying_pattern = True
#setting all LED to Off if they are previously Glown
GPIO.output(LIGHTS, GPIO.LOW)
for i in range(current_level):
GPIO.output(LIGHTS[pattern[i]], GPIO.HIGH)
time.sleep(speed)
GPIO.output(LIGHTS[pattern[i]], GPIO.LOW)
time.sleep(speed)
is_displaying_pattern = False
#Function Which wait for Player to enter the same pattern as pattern of LED's
def wait_for_player_to_repeat_pattern():
#Until game is not won
while not is_won_current_level and not is_game_over:
verify_player_auto()
time.sleep(0.1)
#Start new game after loosing initializing every variable
def reset_board_for_new_game():
global is_displaying_pattern, is_won_current_level, is_game_over
global current_level, current_step_of_level, pattern,flag
is_displaying_pattern = False
is_won_current_level = False
is_game_over = False
current_level = 1
current_step_of_level = 0
pattern = []
flag = 0
GPIO.output(LIGHTS, GPIO.LOW)
# Function to start the game
def start_game():
while True:
add_new_color_to_pattern()
display_pattern_to_player()
wait_for_player_to_repeat_pattern()
if is_game_over:
print("Game Over! Your max score was {} colors!\n".format(current_level-1))
play_again = raw_input("Enter 'Y' to play again, or just press [ENTER] to exit.\n")
if play_again == "Y" or play_again == "y":
reset_board_for_new_game()
print("Begin new round!\n")
else:
print("Thanks for playing!\n")
break
time.sleep(2)
#We have done Threading for Performing multiple task to make this responsive
def start_game_monitor():
t = threading.Thread(target=start_game)
t.daemon = True
t.start()
t.join()
#DRiver Function
def main():
global play,flag
try:
initialize_gpio()
GPIO.output(9,0)
#Condition for detecting motion for first time using motion sensor to start game
while True:
if flag == 0 :
if(GPIO.input(5) == 1):
GPIO.output(9,True)
flag = 1
#time.sleep(2)
else:
break
print("Begin new round!\n")
play=1
start_game_monitor()
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
devtools_browser.py | # Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Base class support for browsers that speak the dev tools protocol"""
import glob
import gzip
import logging
import os
import re
import shutil
import subprocess
import threading
import time
import monotonic
import ujson as json
from .optimization_checks import OptimizationChecks
class DevtoolsBrowser(object):
"""Devtools Browser base"""
CONNECT_TIME_LIMIT = 120
def __init__(self, options, job, use_devtools_video=True):
self.options = options
self.job = job
self.devtools = None
self.task = None
self.event_name = None
self.browser_version = None
self.device_pixel_ratio = None
self.use_devtools_video = use_devtools_video
self.lighthouse_command = None
self.devtools_screenshot = True
self.support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'support')
self.script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'js')
def connect(self, task):
"""Connect to the dev tools interface"""
ret = False
from internal.devtools import DevTools
self.devtools = DevTools(self.options, self.job, task, self.use_devtools_video)
if task['running_lighthouse']:
ret = self.devtools.wait_for_available(self.CONNECT_TIME_LIMIT)
else:
if self.devtools.connect(self.CONNECT_TIME_LIMIT):
logging.debug("Devtools connected")
ret = True
else:
task['error'] = "Error connecting to dev tools interface"
logging.critical(task['error'])
self.devtools = None
return ret
def disconnect(self):
"""Disconnect from dev tools"""
if self.devtools is not None:
# Always navigate to about:blank after finishing in case the tab is
# remembered across sessions
if self.task is not None and self.task['error'] is None:
self.devtools.send_command('Page.navigate', {'url': 'about:blank'}, wait=True)
self.devtools.close()
self.devtools = None
def prepare_browser(self, task):
"""Prepare the running browser (mobile emulation, UA string, etc"""
if self.devtools is not None:
# Figure out the native viewport size
if not self.options.android:
size = self.devtools.execute_js("[window.innerWidth, window.innerHeight]")
if size is not None and len(size) == 2:
task['actual_viewport'] = {"width": size[0], "height": size[1]}
# Get the native device pixel ratio
if self.device_pixel_ratio is None:
self.device_pixel_ratio = 1.0
try:
ratio = self.devtools.execute_js('window.devicePixelRatio')
if ratio is not None:
self.device_pixel_ratio = max(1.0, float(ratio))
except Exception:
pass
# Clear the caches
if not task['cached']:
self.devtools.send_command("Network.clearBrowserCache", {},
wait=True)
self.devtools.send_command("Network.clearBrowserCookies", {},
wait=True)
# Mobile Emulation
if not self.options.android and \
'mobile' in self.job and self.job['mobile'] and \
'width' in self.job and 'height' in self.job and \
'dpr' in self.job:
width = int(re.search(r'\d+', str(self.job['width'])).group())
height = int(re.search(r'\d+', str(self.job['height'])).group())
self.devtools.send_command("Emulation.setDeviceMetricsOverride",
{"width": width,
"height": height,
"screenWidth": width,
"screenHeight": height,
"scale": 1,
"positionX": 0,
"positionY": 0,
"deviceScaleFactor": float(self.job['dpr']),
"mobile": True,
"fitWindow": False,
"screenOrientation":
{"angle": 0, "type": "portraitPrimary"}},
wait=True)
self.devtools.send_command("Emulation.setTouchEmulationEnabled",
{"enabled": True,
"configuration": "mobile"},
wait=True)
if not self.options.throttle and 'throttle_cpu' in self.job:
logging.debug('CPU Throttle target: %0.3fx', self.job['throttle_cpu'])
if self.job['throttle_cpu'] > 1:
self.devtools.send_command("Emulation.setCPUThrottlingRate",
{"rate": self.job['throttle_cpu']},
wait=True)
# Location
if 'lat' in self.job and 'lng' in self.job:
try:
lat = float(str(self.job['lat']))
lng = float(str(self.job['lng']))
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': 0})
except Exception:
pass
# UA String
ua_string = self.devtools.execute_js("navigator.userAgent")
if ua_string is not None:
match = re.search(r'Chrome\/(\d+\.\d+\.\d+\.\d+)', ua_string)
if match:
self.browser_version = match.group(1)
if 'uastring' in self.job:
ua_string = self.job['uastring']
if ua_string is not None and 'AppendUA' in task:
ua_string += ' ' + task['AppendUA']
if ua_string is not None:
self.job['user_agent_string'] = ua_string
# Disable js
if self.job['noscript']:
self.devtools.send_command("Emulation.setScriptExecutionDisabled",
{"value": True}, wait=True)
self.devtools.prepare_browser()
def on_start_recording(self, task):
"""Start recording"""
task['page_data'] = {'date': time.time()}
task['page_result'] = None
task['run_start_time'] = monotonic.monotonic()
if self.browser_version is not None and 'browserVersion' not in task['page_data']:
task['page_data']['browserVersion'] = self.browser_version
task['page_data']['browser_version'] = self.browser_version
if not self.options.throttle and 'throttle_cpu' in self.job:
task['page_data']['throttle_cpu_requested'] = self.job['throttle_cpu_requested']
if self.job['throttle_cpu'] > 1:
task['page_data']['throttle_cpu'] = self.job['throttle_cpu']
if self.devtools is not None:
self.devtools.start_recording()
def on_stop_capture(self, task):
"""Do any quick work to stop things that are capturing data"""
if self.devtools is not None:
self.devtools.stop_capture()
if 'heroElementTimes' in self.job and self.job['heroElementTimes']:
hero_elements = None
custom_hero_selectors = {}
if 'heroElements' in self.job:
custom_hero_selectors = self.job['heroElements']
with open(os.path.join(self.script_dir, 'hero_elements.js'), 'rb') as script_file:
hero_elements_script = script_file.read()
script = hero_elements_script + '(' + json.dumps(custom_hero_selectors) + ')'
hero_elements = self.devtools.execute_js(script)
if hero_elements is not None:
path = os.path.join(task['dir'], task['prefix'] + '_hero_elements.json.gz')
with gzip.open(path, 'wb', 7) as outfile:
outfile.write(json.dumps(hero_elements))
def on_stop_recording(self, task):
"""Stop recording"""
if self.devtools is not None:
self.devtools.collect_trace()
if self.devtools_screenshot:
if self.job['pngScreenShot']:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.png')
self.devtools.grab_screenshot(screen_shot, png=True)
else:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.jpg')
self.devtools.grab_screenshot(screen_shot, png=False, resize=600)
# Collect end of test data from the browser
self.collect_browser_metrics(task)
# Stop recording dev tools (which also collects the trace)
self.devtools.stop_recording()
def run_task(self, task):
"""Run an individual test"""
if self.devtools is not None:
self.task = task
logging.debug("Running test")
end_time = monotonic.monotonic() + task['test_time_limit']
task['current_step'] = 1
recording = False
while len(task['script']) and task['error'] is None and \
monotonic.monotonic() < end_time:
self.prepare_task(task)
command = task['script'].pop(0)
if not recording and command['record']:
recording = True
self.on_start_recording(task)
self.process_command(command)
if command['record']:
self.devtools.wait_for_page_load()
if not task['combine_steps'] or not len(task['script']):
self.on_stop_capture(task)
self.on_stop_recording(task)
recording = False
self.on_start_processing(task)
self.wait_for_processing(task)
self.process_devtools_requests(task)
self.step_complete(task)
if task['log_data']:
# Move on to the next step
task['current_step'] += 1
self.event_name = None
task['navigated'] = True
self.task = None
def on_start_processing(self, task):
"""Start any processing of the captured data"""
if task['log_data']:
# Start the processing that can run in a background thread
optimization = OptimizationChecks(self.job, task, self.get_requests())
optimization.start()
# Run the video post-processing
if self.use_devtools_video and self.job['video']:
self.process_video()
self.wappalyzer_detect(task, self.devtools.main_request_headers)
# wait for the background optimization checks
optimization.join()
def wait_for_processing(self, task):
"""Wait for the background processing (if any)"""
pass
def execute_js(self, script):
"""Run javascipt"""
ret = None
if self.devtools is not None:
ret = self.devtools.execute_js(script)
return ret
def prepare_task(self, task):
"""Format the file prefixes for multi-step testing"""
if task['current_step'] == 1:
task['prefix'] = task['task_prefix']
task['video_subdirectory'] = task['task_video_prefix']
else:
task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step'])
task['video_subdirectory'] = '{0}_{1:d}'.format(task['task_video_prefix'],
task['current_step'])
if task['video_subdirectory'] not in task['video_directories']:
task['video_directories'].append(task['video_subdirectory'])
if self.event_name is not None:
task['step_name'] = self.event_name
else:
task['step_name'] = 'Step_{0:d}'.format(task['current_step'])
def process_video(self):
"""Post process the video"""
from internal.video_processing import VideoProcessing
video = VideoProcessing(self.options, self.job, self.task)
video.process()
def process_devtools_requests(self, task):
"""Process the devtools log and pull out the requests information"""
path_base = os.path.join(self.task['dir'], self.task['prefix'])
devtools_file = path_base + '_devtools.json.gz'
if os.path.isfile(devtools_file):
from internal.support.devtools_parser import DevToolsParser
out_file = path_base + '_devtools_requests.json.gz'
options = {'devtools': devtools_file, 'cached': task['cached'], 'out': out_file}
netlog = path_base + '_netlog_requests.json.gz'
options['netlog'] = netlog if os.path.isfile(netlog) else None
optimization = path_base + '_optimization.json.gz'
options['optimization'] = optimization if os.path.isfile(optimization) else None
user_timing = path_base + '_user_timing.json.gz'
options['user'] = user_timing if os.path.isfile(user_timing) else None
coverage = path_base + '_coverage.json.gz'
options['coverage'] = coverage if os.path.isfile(coverage) else None
cpu = path_base + '_timeline_cpu.json.gz'
options['cpu'] = cpu if os.path.isfile(cpu) else None
parser = DevToolsParser(options)
parser.process()
# Cleanup intermediate files that are not needed
if 'debug' not in self.job or not self.job['debug']:
if os.path.isfile(netlog):
os.remove(netlog)
if os.path.isfile(optimization):
os.remove(optimization)
if os.path.isfile(coverage):
os.remove(coverage)
if os.path.isfile(devtools_file):
os.remove(devtools_file)
if 'page_data' in parser.result and 'result' in parser.result['page_data']:
self.task['page_result'] = parser.result['page_data']['result']
def run_js_file(self, file_name):
"""Execute one of our js scripts"""
ret = None
script = None
script_file_path = os.path.join(self.script_dir, file_name)
if os.path.isfile(script_file_path):
with open(script_file_path, 'rb') as script_file:
script = script_file.read()
if script is not None:
ret = self.devtools.execute_js(script)
return ret
def collect_browser_metrics(self, task):
"""Collect all of the in-page browser metrics that we need"""
user_timing = self.run_js_file('user_timing.js')
if user_timing is not None:
path = os.path.join(task['dir'], task['prefix'] + '_timed_events.json.gz')
with gzip.open(path, 'wb', 7) as outfile:
outfile.write(json.dumps(user_timing))
page_data = self.run_js_file('page_data.js')
if page_data is not None:
task['page_data'].update(page_data)
if 'customMetrics' in self.job:
custom_metrics = {}
for name in self.job['customMetrics']:
script = 'var wptCustomMetric = function() {' +\
self.job['customMetrics'][name] +\
'};try{wptCustomMetric();}catch(e){};'
custom_metrics[name] = self.devtools.execute_js(script)
path = os.path.join(task['dir'], task['prefix'] + '_metrics.json.gz')
with gzip.open(path, 'wb', 7) as outfile:
outfile.write(json.dumps(custom_metrics))
def process_command(self, command):
"""Process an individual script command"""
logging.debug("Processing script command:")
logging.debug(command)
if command['command'] == 'navigate':
self.task['page_data']['URL'] = command['target']
url = str(command['target']).replace('"', '\"')
script = 'window.location="{0}";'.format(url)
script = self.prepare_script_for_record(script)
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'logdata':
self.task['combine_steps'] = False
if int(re.search(r'\d+', str(command['target'])).group()):
logging.debug("Data logging enabled")
self.task['log_data'] = True
else:
logging.debug("Data logging disabled")
self.task['log_data'] = False
elif command['command'] == 'combinesteps':
self.task['log_data'] = True
self.task['combine_steps'] = True
elif command['command'] == 'seteventname':
self.event_name = command['target']
elif command['command'] == 'exec':
script = command['target']
if command['record']:
script = self.prepare_script_for_record(script)
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'sleep':
delay = min(60, max(0, int(re.search(r'\d+', str(command['target'])).group())))
if delay > 0:
time.sleep(delay)
elif command['command'] == 'setabm':
self.task['stop_at_onload'] = bool('target' in command and
int(re.search(r'\d+',
str(command['target'])).group()) == 0)
elif command['command'] == 'setactivitytimeout':
if 'target' in command:
milliseconds = int(re.search(r'\d+', str(command['target'])).group())
self.task['activity_time'] = max(0, min(30, float(milliseconds) / 1000.0))
elif command['command'] == 'setuseragent':
self.task['user_agent_string'] = command['target']
elif command['command'] == 'setcookie':
if 'target' in command and 'value' in command:
url = command['target'].strip()
cookie = command['value']
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
name = cookie[:pos].strip()
value = cookie[pos + 1:].strip()
if len(name) and len(value) and len(url):
self.devtools.send_command('Network.setCookie',
{'url': url, 'name': name, 'value': value})
elif command['command'] == 'setlocation':
try:
if 'target' in command and command['target'].find(',') > 0:
accuracy = 0
if 'value' in command and re.match(r'\d+', command['value']):
accuracy = int(re.search(r'\d+', str(command['value'])).group())
parts = command['target'].split(',')
lat = float(parts[0])
lng = float(parts[1])
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': accuracy})
except Exception:
pass
elif command['command'] == 'addheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'setheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'resetheaders':
self.devtools.reset_headers()
elif command['command'] == 'clearcache':
self.devtools.clear_cache()
def navigate(self, url):
"""Navigate to the given URL"""
if self.devtools is not None:
self.devtools.send_command('Page.navigate', {'url': url}, wait=True)
def get_requests(self):
"""Get the request details for running an optimization check"""
requests = None
if self.devtools is not None:
requests = self.devtools.get_requests()
return requests
def lighthouse_thread(self):
"""Run lighthouse in a thread so we can kill it if it times out"""
cmd = self.lighthouse_command
self.task['lighthouse_log'] = cmd + "\n"
logging.debug(cmd)
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
for line in iter(proc.stderr.readline, b''):
try:
logging.debug(line.rstrip())
self.task['lighthouse_log'] += line
except Exception:
pass
proc.communicate()
def run_lighthouse_test(self, task):
"""Run a lighthouse test against the current browser session"""
task['lighthouse_log'] = ''
if 'url' in self.job and self.job['url'] is not None:
self.job['shaper'].configure(self.job, task)
output_path = os.path.join(task['dir'], 'lighthouse.json')
json_file = os.path.join(task['dir'], 'lighthouse.report.json')
json_gzip = os.path.join(task['dir'], 'lighthouse.json.gz')
html_file = os.path.join(task['dir'], 'lighthouse.report.html')
html_gzip = os.path.join(task['dir'], 'lighthouse.html.gz')
time_limit = min(int(task['time_limit']), 80)
command = ['lighthouse',
'"{0}"'.format(self.job['url']),
'--disable-network-throttling',
'--disable-cpu-throttling',
'--throttling-method', 'provided',
'--enable-error-reporting',
'--max-wait-for-load', str(int(time_limit * 1000)),
'--port', str(task['port']),
'--output', 'html',
'--output', 'json',
'--output-path', '"{0}"'.format(output_path)]
if self.job['keep_lighthouse_trace']:
command.append('--save-assets')
if self.options.android or 'mobile' not in self.job or not self.job['mobile']:
command.append('--disable-device-emulation')
if 'user_agent_string' in self.job:
sanitized_user_agent = re.sub(r'[^a-zA-Z0-9_\-.;:/()\[\] ]+', '', self.job['user_agent_string'])
command.append('--chrome-flags="--user-agent=\'{0}\'"'.format(sanitized_user_agent))
if len(task['block']):
for pattern in task['block']:
pattern = "'" + pattern.replace("'", "'\\''") + "'"
command.extend(['--blocked-url-patterns', pattern])
if 'headers' in task:
headers_file = os.path.join(task['dir'], 'lighthouse-headers.json')
with open(headers_file, 'wb') as f_out:
json.dump(task['headers'], f_out)
command.extend(['--extra-headers', '"{0}"'.format(headers_file)])
cmd = ' '.join(command)
self.lighthouse_command = cmd
# Give lighthouse up to 10 minutes to run all of the audits
try:
lh_thread = threading.Thread(target=self.lighthouse_thread)
lh_thread.start()
lh_thread.join(600)
except Exception:
pass
from .os_util import kill_all
kill_all('node', True)
self.job['shaper'].reset()
# Rename and compress the trace file, delete the other assets
if self.job['keep_lighthouse_trace']:
try:
lh_trace_src = os.path.join(task['dir'], 'lighthouse-0.trace.json')
if os.path.isfile(lh_trace_src):
# read the JSON in and re-write it line by line to match the other traces
with open(lh_trace_src, 'rb') as f_in:
trace = json.load(f_in)
if trace is not None and 'traceEvents' in trace:
lighthouse_trace = os.path.join(task['dir'],
'lighthouse_trace.json.gz')
with gzip.open(lighthouse_trace, 'wb', 7) as f_out:
f_out.write('{"traceEvents":[{}')
for trace_event in trace['traceEvents']:
f_out.write(",\n")
f_out.write(json.dumps(trace_event))
f_out.write("\n]}")
except Exception:
pass
# Delete all the left-over lighthouse assets
files = glob.glob(os.path.join(task['dir'], 'lighthouse-*'))
for file_path in files:
try:
os.remove(file_path)
except Exception:
pass
if os.path.isfile(json_file):
# Remove the raw screenshots if they were stored with the file
lh_report = None
with open(json_file, 'rb') as f_in:
lh_report = json.load(f_in)
modified = False
if lh_report is not None and 'audits' in lh_report:
if 'screenshots' in lh_report['audits']:
del lh_report['audits']['screenshots']
modified = True
if 'screenshot-thumbnails' in lh_report['audits']:
del lh_report['audits']['screenshot-thumbnails']
modified = True
if modified:
with gzip.open(json_gzip, 'wb', 7) as f_out:
json.dump(lh_report, f_out)
else:
with open(json_file, 'rb') as f_in:
with gzip.open(json_gzip, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(json_file)
except Exception:
pass
# Extract the audit scores
if lh_report is not None:
audits = {}
# v1.x
if 'aggregations' in lh_report:
for entry in lh_report['aggregations']:
if 'name' in entry and 'total' in entry and \
'scored' in entry and entry['scored']:
name = entry['name'].replace(' ', '')
audits[name] = entry['total']
# v2.x
elif 'reportCategories' in lh_report:
for category in lh_report['reportCategories']:
if 'name' in category and 'score' in category:
category_name = category['name'].replace(' ', '')
score = float(category['score']) / 100.0
audits[category_name] = score
if category['name'] == 'Performance' and 'audits' in category:
for audit in category['audits']:
if 'id' in audit and 'group' in audit and \
audit['group'] == 'perf-metric' and \
'result' in audit and \
'rawValue' in audit['result']:
name = category_name + '.' + \
audit['id'].replace(' ', '')
audits[name] = audit['result']['rawValue']
# v3.x
elif 'categories' in lh_report:
for categoryId in lh_report['categories']:
category = lh_report['categories'][categoryId]
if 'title' not in category or 'score' not in category:
continue
category_title = category['title'].replace(' ', '')
audits[category_title] = category['score']
if categoryId != 'performance' or 'auditRefs' not in category:
continue
for auditRef in category['auditRefs']:
if auditRef['id'] not in lh_report['audits']:
continue
if 'group' not in auditRef or auditRef['group'] != 'metrics':
continue
audit = lh_report['audits'][auditRef['id']]
name = category_title + '.' + audit['id']
audits[name] = audit['rawValue']
audits_gzip = os.path.join(task['dir'], 'lighthouse_audits.json.gz')
with gzip.open(audits_gzip, 'wb', 7) as f_out:
json.dump(audits, f_out)
if os.path.isfile(html_file):
# Remove the raw screenshots if they were stored with the file
with open(html_file, 'rb') as f_in:
lh_report = f_in.read()
start = lh_report.find('\n "screenshots')
if start >= 0:
end = lh_report.find('\n },', start)
if end >= 0:
lh_report = lh_report[:start] + lh_report[end + 7:]
with gzip.open(html_gzip, 'wb', 7) as f_out:
f_out.write(lh_report)
try:
os.remove(html_file)
except Exception:
pass
def wappalyzer_detect(self, task, request_headers):
"""Run the wappalyzer detection"""
# Run the Wappalyzer detection (give it 30 seconds at most)
completed = False
if self.devtools is not None:
try:
logging.debug('wappalyzer_detect')
detect_script = self.wappalyzer_script(request_headers)
response = self.devtools.send_command("Runtime.evaluate",
{'expression': detect_script,
'awaitPromise': True,
'returnByValue': True,
'timeout': 30000},
wait=True, timeout=30)
if response is not None and 'result' in response and\
'result' in response['result'] and\
'value' in response['result']['result']:
result = response['result']['result']['value']
if result:
completed = True
logging.debug(result)
detected = json.loads(result)
if 'categories' in detected:
task['page_data']['detected'] = dict(detected['categories'])
if 'apps' in detected:
task['page_data']['detected_apps'] = dict(detected['apps'])
except Exception as err:
logging.exception("Exception running Wappalyzer: %s", err.__str__())
if not completed:
task['page_data']['wappalyzer_failed'] = 1
def wappalyzer_script(self, response_headers):
"""Build the wappalyzer script to run in-browser"""
script = None
try:
with open(os.path.join(self.support_path, 'Wappalyzer', 'script.js')) as f_in:
script = f_in.read()
if script is not None:
wappalyzer = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'wappalyzer.js')) as f_in:
wappalyzer = f_in.read()
if wappalyzer is not None:
json_data = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'apps.json')) as f_in:
json_data = f_in.read()
if json is not None:
# Format the headers as a dictionary of lists
headers = {}
if response_headers is not None:
if isinstance(response_headers, dict):
for key in response_headers:
values = []
entry = response_headers[key]
if isinstance(entry, list):
values = entry
elif isinstance(entry, (str, unicode)):
entries = entry.split('\n')
for value in entries:
values.append(value.strip())
if values:
headers[key.lower()] = values
elif isinstance(response_headers, list):
for pair in response_headers:
if isinstance(pair, (str, unicode)):
parts = pair.split(':', 1)
key = parts[0].strip(' :\n\t').lower()
value = parts[1].strip(' :\n\t')
if key not in headers:
headers[key] = []
headers[key].append(value)
script = script.replace('%WAPPALYZER%', wappalyzer)
script = script.replace('%JSON%', json_data)
script = script.replace('%RESPONSE_HEADERS%', json.dumps(headers))
except Exception:
pass
return script
|
utils.py | import signal
import sys
from threading import Thread, Event
#http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/
def memoize(f):
""" Memoization decorator for functions taking one or more arguments. """
class memodict(dict):
def __init__(self, f):
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return memodict(f)
def killable_function(f):
thread = Thread(target=f, daemon=True)
thread.start()
thread.join()
def register_exit_signals(sigh):
"""
Given a signal handler sigh that should run on program exit,
registers all the cuauv-deemed-appropriate signals to that signal
handler.
"""
# On Control-C.
signal.signal(signal.SIGINT, sigh)
# On pkill.
signal.signal(signal.SIGTERM, sigh)
# On closing the commanding terminal.
signal.signal(signal.SIGHUP, sigh)
def watch_thread_wrapper(f):
"""
Calls a function f that can be properly shut down on process
termination. This function will exit the program on SIGINT or SIGTERM.
f should take in a watcher and an event and should exit when
the event is set.
EXAMPLE:
def f(watcher, quit_event):
watcher.watch(group_of_interest)
while not quit_event.is_set():
# do things
watcher.wait()
watch_thread_wrapper(f) # Begins the loop above.
"""
import shm
watcher = shm.watchers.watcher()
quit_event = Event()
thread = Thread(target=f, args=(watcher, quit_event))
def interrupt_handler(_signal, _frame):
quit_event.set()
watcher.disable()
thread.join()
sys.exit(0)
register_exit_signals(interrupt_handler)
thread.start()
# XXX: Python HACK, join calls without a timeout do not respond to signals
while thread.is_alive():
thread.join(60)
|
server.py |
import sys
import socket
import argparse
from threading import Thread, Event
from getch import getch
from time import sleep
MESSAGE = "Hello, World!"
""" Message protocol
(x[7] . x[0:7]) . yyyyzzzzzzzzzzzzzzzzz
x - packet type
y - size of data segment
z - message data
Possible packet types (x[7])::
0: Request
1: Response
Possible packet types (x[0:7])::
0: Heartbeat
2: Screen data
"""
try:
input = raw_input
except:
pass
class Server(object):
def __init__(self, dest_ip, send_port, recv_port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.dest_ip = dest_ip
self.send_port = send_port
self.recv_port = recv_port
self.sock.bind((dest_ip, send_port))
self.sock.settimeout(0.1)
self.dest_terminal_size = (100, 100)
self.last_display = ""
self.event_stop = Event()
""" 0 - HEARTBEAT """
def req_heartbeat(self):
x = 0x00 | 0
y = 6
z = "alive?"
self.send_raw(pack(x, y, z))
def resp_heartbeat(self):
x = 0x80 | 0
y = 12
z = "yeah my dude"
self.send_raw(pack(x, y, z))
""" 2 - DISPLAY """
def req_display(self):
rows, columns = os.popen('stty size', 'r').read().split()
x = 0x80 | 2
y = 4
z = ("%x" % rows).zfill(2) + ("%x" % cols).zfill(2)
self.send_raw(pack(x, y, z))
def resp_display(self):
x = 0x80 | 2
rows, cols = self.dest_terminal_size
z = self._dummy_display_data(rows, cols)
y = len(z)
self.send_raw(pack(x, y, z))
def _dummy_display_data(self, rows, cols):
""" Return dummy display data """
s = ""
s += "A" * cols + "\n"
for _ in range(rows - 2):
s += "B" + " " * (cols - 2) + "C" + "\n"
s += "D" * cols + "\n"
return s
""" Helper functions """
def pack(self, x, y, z):
""" Make packet """
assert x <= 0xFF
assert y <= 0xFFFFFFFF
assert y == len(z)
x_ = ("%x" % x).zfill(2)
y_ = ("%x" % y).zfill(8)
z_ = z
return x_ + y_ + z_
""" RAW SEND """
def send_raw(self, packet):
self.sock.sendto(packet, (self.dest_ip, self.send_port))
def recv_raw(self, size=1024):
data, addr = self.sock.recvfrom(size)
if addr == self.dest_ip:
print(data)
""" Actual processing n stuff """
def handle_recv(self):
try:
packet_header = self.recv_raw(5)
# Unpack
x = chr(packet_header[0])
y = long(packet_header[1:5])
z = self.recv_raw(y)
# Handle packet
if x == 0x00 | 0:
# Heartbeat request
self.resp_heartbeat()
elif x == 0x80 | 0:
# Heartbeat response
print("[INFO] Heartbeat response received")
elif x == 0x00 | 2:
# Display request
rows = long(z[0:2])
cols = long(z[2:4])
self.dest_terminal_size = (rows, cols)
self.resp_display()
elif x == 0x80 | 2:
# Display response
self.last_display = z
except socket.timeout:
pass
def update_loop(self):
""" Handles incoming packets and updates display """
while not self.event_stop.is_set():
self.handle_recv()
self.req_display()
sleep(0.01)
def input_loop(self):
while not self.event_stop.is_set():
c = getch()
if c == "q":
print("Quitting")
self.event_stop.set()
def display_loop(self):
while not self.event_stop.is_set():
print(self.last_display)
sleep(0.001)
def run(self):
""" Run the server """
threads = []
threads.append(Thread(target=self.update_loop))
threads.append(Thread(target=self.input_loop))
threads.append(Thread(target=self.display_loop))
for t in threads:
t.daemon = True # Set all threads as daemon
t.start()
try:
while not self.event_stop.is_set():
sleep(0.001)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ASCII Skype!")
parser.add_argument("--dest", "-d",
help="Destination IP")
parser.add_argument("--recv_port", "-r",
help="Receive Port")
parser.add_argument("--send_port", "-s",
help="Send Port")
args = parser.parse_args()
if not args.dest or not args.send_port or not args.recv_port:
parser.print_help()
sys.exit(1)
s = Server(args.dest, int(args.send_port), int(args.recv_port))
s.run()
|
lock.py | import threading
class Lock:
"""
Represents a lock object which has one-to-one correspondence with a Mysql connection.
A lock is held by a connection.
"""
def __init__(self, locker, name):
"""
:param locker: Instance of Locker which will be used to generate new mysql connections.
:param name: Name of the lock
"""
self.locker = locker
self.name = name
self.acquired = False
self.conn = None
self.release_event = threading.Event()
def acquire(self, timeout=-1, refresh_interval_secs=10):
"""
Try to obtain lock with the set name.
:param timeout: Timeout for getting the lock in seconds. Defaults to -1 which will wait for indefinite time.
:param refresh_interval_secs: Interval at which a thread will keep pinging on mysql connection that's holding
the lock.
:return: The return value is True if the lock is acquired successfully, False if not (for example if the
timeout expired).
"""
self.conn = self.locker.connection_factory.new()
with self.conn.cursor() as cursor:
cursor.execute("SELECT GET_LOCK(%s, %s)", (self.name, str(timeout)))
rows = cursor.fetchall()
ret_value = rows[0][0]
if isinstance(ret_value, int) and ret_value == 1:
self.acquired = True
# clear the release event since this is freshly acquired
self.release_event.clear()
threading.Thread(target=self.refresh, args=(refresh_interval_secs,)).start()
return True
return False
def release(self):
"""
Releases the lock and closes the corresponding mysql connection.
:return: No return value.
"""
self.release_event.set() # set this as released, so that the refresh thread exit.
with self.conn.cursor() as cursor:
cursor.execute("DO RELEASE_LOCK(%s)", (self.name,))
# this anyway releases the lock
self.conn.close()
self.conn = None
self.acquired = False
def refresh(self, interval_seconds):
while not self.release_event.isSet():
# this returns true if release has been set while waiting
if self.release_event.wait(interval_seconds):
break
self.conn.ping()
def locked(self):
"""
Check whether lock is acquired or not.
:return: True if lock is acquired, False if not.
"""
return self.acquired
|
test_fx.py | # Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument, _format_arg
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
def wrapped_named_tup(p1, *, p2):
return p1.x + p2.y
wrap(wrapped_named_tup)
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
def _custom_fx_repr_fn(self) -> str:
return f"Pair(x={_format_arg(self.x)}, y={_format_arg(self.y)})"
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_named_tuple_inlined(self):
class NamedTupMod(torch.nn.Module):
def forward(self, inp):
return wrapped_named_tup(Pair(inp, 1.2), p2=Pair(3.4, inp))
m = NamedTupMod()
input = torch.rand(3, 4)
ref = m(input)
traced = symbolic_trace(m)
res = traced(input)
self.assertEqual(ref, res)
# Check Pair NamedTuple works when inlined into the function call.
ph = call_func = None
for node in traced.graph.nodes:
if node.op == "placeholder":
ph = node
elif node.op == "call_function" and node.target == wrapped_named_tup:
node.update_arg(0, Pair(ph, 1.2))
node.update_kwarg("p2", Pair(3.4, ph))
call_func = node
break
self.assertTrue(call_func is not None)
self.assertTrue(isinstance(call_func.args[0], Pair))
self.assertTrue(isinstance(call_func.kwargs["p2"], Pair))
self.assertEqual(_format_arg(call_func.args[0]), "Pair(x=%inp, y=1.2)")
self.assertEqual(_format_arg(call_func.kwargs["p2"]), "Pair(x=3.4, y=%inp)")
traced.graph.eliminate_dead_code()
traced.recompile()
res = traced(input)
self.assertEqual(ref, res)
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
asyncio_client_generator.py | from ib_tws_server.codegen.generator_utils import GeneratorUtils
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
import inspect
def forward_method_parameters_dict_style(params: List[inspect.Parameter]) -> str:
return ",".join([ f"{v.name} = {v.name}" for v in params ])
def request_state_member_name(d: ApiDefinition):
return f"_req_state"
def subscription_member_name(d: ApiDefinition):
return f"_subscriptions"
def response_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], False))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def streaming_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], True))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def request_id(d: ApiDefinition, m: Callable):
if not d.uses_req_id:
return f"'{d.request_method.__name__}'"
else:
return GeneratorUtils.req_id_param_name(m)
def current_request_state(d: ApiDefinition, m: Callable):
return f"self.{request_state_member_name(d)}[{request_id(d, m)}]"
def bind_method(d: ApiDefinition, m: Callable, param_values: List[str]) -> str:
param_values[0] = f"self._client.{m.__name__}"
return f"functools.partial({','.join(param_values)})"
class AsyncioClientGenerator:
@staticmethod
def generate(filename):
def init_callback(d: ApiDefinition, m: Callable, cb: str):
if d.callback_methods is not None or d.done_method is not None:
return f"{current_request_state(d,m)}.{cb} = {cb}"
return ""
def init_request_id(d: ApiDefinition, u: Callable):
if d.uses_req_id:
return f"{GeneratorUtils.req_id_param_name(d.request_method)} = self.next_request_id()"
else:
return ""
def init_subscription(d: ApiDefinition):
if d.cancel_method is None:
raise RuntimeError(f"Request does not support cancellation {d.request_method.__name__}")
current_subscription = f"self.{subscription_member_name(d)}[{request_id(d, d.request_method)}]"
return f"{current_subscription}= SubscriptionGenerator(self.__{d.cancel_method.__name__}, {GeneratorUtils.req_id_param_name(d.request_method)})"
def async_request_method(d: ApiDefinition, is_subscription: bool):
method_name = GeneratorUtils.request_method_name(d, is_subscription)
original_sig = GeneratorUtils.signature(d.request_method)
signature = GeneratorUtils.request_signature(d, is_subscription)
param_values = [ p.name if p.name != d.subscription_flag_name else f"{d.subscription_flag_value if is_subscription else not d.subscription_flag_value}" for p in original_sig.parameters.values() ]
if is_subscription:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
ret: SubscriptionGenerator = None
with self._lock:
ret = {init_subscription(d)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return ret"""
if d.callback_methods is not None or d.done_method is not None:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
loop = asyncio.get_running_loop()
future = loop.create_future()
def cb(res: {GeneratorUtils.request_return_type(d, is_subscription)}):
loop.call_soon_threadsafe(future.set_result, res)
{init_request_id(d, d.request_method)}
with self._lock:
{init_callback(d, d.request_method, 'cb')}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
res = (await future)
if isinstance(res, IbError):
raise res
return res"""
else:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return None"""
def cancel_method(d: ApiDefinition):
return f"""
def __{GeneratorUtils.method_declaration(d.cancel_method)}:
{GeneratorUtils.doc_string(d.cancel_method)}
self.cancel_request({request_id(d,d.cancel_method)})
self._writer.queue.put({bind_method(d, d.cancel_method, list(GeneratorUtils.signature(d.cancel_method).parameters))})"""
with open(filename, "w") as f:
f.write(f"""
import asyncio
import functools
from collections import defaultdict
from ibapi.client import EClient
from ib_tws_server.ib_error import *
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.gen.client_responses import *
from ib_tws_server.gen.asyncio_wrapper import *
from ib_tws_server.ib_imports import *
from threading import Lock, Thread
from typing import Callable, Dict, List, Tuple
class AsyncioClient():
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_wrapper: AsyncioWrapper
_client: EClient
def __init__(self):
self._lock = Lock()
self._current_request_id = 0
self._req_state = defaultdict(RequestState)
self._subscriptions = defaultdict(SubscriptionGenerator)
self._wrapper = AsyncioWrapper(self._lock, self._req_state, self._subscriptions)
self._client = EClient(self._wrapper)
self._writer = IBWriter(self._client)
self._wrapper._writer = self._writer
def run(self):
self._writer.start()
self._client.run()
def next_request_id(self):
with self._lock:
self._current_request_id += 1
return self._current_request_id
def disconnect(self, clean=False):
self._wrapper._expecting_disconnect = clean
return self._client.disconnect()
def cancel_request(self, id: RequestId):
response_cb = None
with self._lock:
if id in self._req_state:
response_cb = self._req_state[id].cb
del self._req_state[id]
if id in self._subscriptions:
del self._subscriptions[id]
if response_cb is not None:
response_cb(None)
def start(self, host: str, port: int, client_id: int):
self._client.connect(host, port, client_id)
thread = Thread(target = self.run)
thread.start()
setattr(thread, "_thread", thread)
def active_request_count(self):
with self._lock:
return len(self._req_state)
def active_subscription_count(self):
with self._lock:
return len(self._subscriptions)
"""
)
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.subscription_flag_name is not None:
f.write(async_request_method(d, False))
f.write(async_request_method(d, True))
else:
f.write(async_request_method(d, d.is_subscription))
if d.cancel_method is not None and (d.is_subscription or d.subscription_flag_name is not None):
f.write(cancel_method(d))
class AsyncioWrapperGenerator:
@staticmethod
def generate(filename):
def update_response(d: ApiDefinition, m:Callable):
if GeneratorUtils.response_is_list(d):
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state.response is None:
req_state.response = []
req_state.response.append({response_instance(d, m)})"""
else:
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state is not None:
req_state.response = {response_instance(d, m)}"""
def call_response_cb(d: ApiDefinition, m: Callable):
if d.callback_methods is not None:
return f"self.call_response_cb({request_id(d,m)})"
else:
return ""
def call_response_cb_if_done(d: ApiDefinition, m: Callable):
if d.has_done_flag:
return f"""
if (done):
{call_response_cb(d, m)}"""
elif not GeneratorUtils.response_is_list(d):
return f"""
{call_response_cb(d,m)}"""
else:
return ""
def callback_method(d: ApiDefinition, m: Callable):
if d.subscription_flag_name is not None:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
is_subscription: bool = False
with self._lock:
is_subscription = {request_id(d, m)} in self._subscriptions
{update_response(d, m)}
if is_subscription:
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})
return
{call_response_cb_if_done(d, m)}"""
elif not d.is_subscription:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
with self._lock:
{update_response(d, m)}
{call_response_cb_if_done(d, m)}"""
else:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})"""
def done_method(d: ApiDefinition):
return f"""
def {GeneratorUtils.method_declaration(d.done_method)}:
{GeneratorUtils.doc_string(d.done_method)}
{call_response_cb(d,d.done_method)}"""
with open(filename, "w") as f:
f.write(f"""
from ibapi.wrapper import EWrapper
from ib_tws_server.ib_error import *
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.gen.client_responses import *
from ib_tws_server.ib_imports import *
from threading import Lock
from typing import Dict, List
class AsyncioWrapper(EWrapper):
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_expecting_disconnect: bool
_writer: IBWriter
def __init__(self, lock: Lock, req_state: Dict[str, RequestState], subscriptions: Dict[int, SubscriptionGenerator]):
self._lock = lock
self._req_state = req_state
self._subscriptions = subscriptions
EWrapper.__init__(self)
self._expecting_disconnect = False
def connectionClosed(self):
if self._expecting_disconnect:
# Wake up writer
self._writer.queue.put(lambda *a, **k: None)
else:
raise RuntimeError("Unexpected disconnect")
def call_response_cb(self, id: RequestId, res=None):
cb = None
with self._lock:
if not id in self._req_state:
return
s = self._req_state[id]
cb = s.cb
if res is None:
res = s.response
del self._req_state[id]
if cb is not None:
cb(res)
def error(self, reqId: int, errorCode: int, errorString: str):
cb = None
if reqId is not None:
with self._lock:
if reqId in self._req_state:
s = self._req_state[reqId]
cb = s.cb
del self._req_state[reqId]
if cb is not None:
cb(IbError(errorString, errorCode))
else:
super().error(reqId, errorCode, errorString)
def call_streaming_cb(self, id: RequestId, res: any):
cb = None
loop = None
with self._lock:
if id in self._subscriptions:
s = self._subscriptions[id]
cb = s.add_to_queue
loop = s._loop
if loop is not None:
loop.call_soon_threadsafe(cb, res)
""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.callback_methods is not None:
for m in d.callback_methods:
f.write(callback_method(d, m))
if d.done_method is not None:
f.write(done_method(d))
|
tcp.py | #!/usr/bin/env python
import socket
import threading
import sbs
TCP_IP = '10.0.0.198'
TCP_PORT = 30003
BUFFER_SIZE = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
def listen(arg1, stop_event):
while (not stop_event.is_set()):
data = s.recv(BUFFER_SIZE)
mesg = str(data, encoding='utf-8').split('\r\n')
for l in mesg:
if l != '':
# print("received data:", l)
cols = l.split(',')
if cols[0] == 'MSG':
pass
s.close()
station = [60.32509, 5.02074]
dust = {}
if __name__ == "__main__":
thread_stop = threading.Event()
t = threading.Thread(target=listen, args=(2, thread_stop))
t.daemon=True
t.start()
try:
while True:
# TODO:: Please write your application code
eval(input())
except KeyboardInterrupt:
pass
finally:
thread_stop.set() |
simulator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: simulator.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import multiprocessing as mp
import time
import threading
import weakref
from abc import abstractmethod, ABCMeta
from collections import defaultdict, namedtuple
import numpy as np
import six
from six.moves import queue
from ..models._common import disable_layer_logging
from ..callbacks import Callback
from ..tfutils.varmanip import SessionUpdate
from ..predict import OfflinePredictor
from ..utils import logger
#from ..utils.timer import *
from ..utils.serialize import loads, dumps
from ..utils.concurrency import LoopThread, ensure_proc_terminate
__all__ = ['SimulatorProcess', 'SimulatorMaster',
'SimulatorProcessStateExchange', 'SimulatorProcessSharedWeight',
'TransitionExperience', 'WeightSync']
try:
import zmq
except ImportError:
logger.warn_dependency('Simulator', 'zmq')
__all__ = []
class TransitionExperience(object):
""" A transition of state, or experience"""
def __init__(self, state, action, reward, **kwargs):
""" kwargs: whatever other attribute you want to save"""
self.state = state
self.action = action
self.reward = reward
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class SimulatorProcessBase(mp.Process):
__metaclass__ = ABCMeta
def __init__(self, idx):
super(SimulatorProcessBase, self).__init__()
self.idx = int(idx)
self.name = u'simulator-{}'.format(self.idx)
self.identity = self.name.encode('utf-8')
@abstractmethod
def _build_player(self):
pass
class SimulatorProcessStateExchange(SimulatorProcessBase):
"""
A process that simulates a player and communicates to master to
send states and receive the next action
"""
__metaclass__ = ABCMeta
def __init__(self, idx, pipe_c2s, pipe_s2c):
"""
:param idx: idx of this process
"""
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
#s2c_socket.set_hwm(5)
s2c_socket.connect(self.s2c)
state = player.current_state()
reward, isOver = 0, False
while True:
c2s_socket.send(dumps(
(self.identity, state, reward, isOver)),
copy=False)
action = loads(s2c_socket.recv(copy=False).bytes)
reward, isOver = player.action(action)
state = player.current_state()
# compatibility
SimulatorProcess = SimulatorProcessStateExchange
class SimulatorMaster(threading.Thread):
""" A base thread to communicate with all StateExchangeSimulatorProcess.
It should produce action for each simulator, as well as
defining callbacks when a transition or an episode is finished.
"""
__metaclass__ = ABCMeta
class ClientState(object):
def __init__(self):
self.memory = [] # list of Experience
def __init__(self, pipe_c2s, pipe_s2c):
super(SimulatorMaster, self).__init__()
self.daemon = True
self.name = 'SimulatorMaster'
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(10)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(10)
# queueing messages to client
self.send_queue = queue.Queue(maxsize=100)
def f():
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
self.send_thread = LoopThread(f)
self.send_thread.daemon = True
self.send_thread.start()
# make sure socket get closed at the end
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
import atexit
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def run(self):
self.clients = defaultdict(self.ClientState)
while True:
msg = loads(self.c2s_socket.recv(copy=False).bytes)
ident, state, reward, isOver = msg
# TODO check history and warn about dead client
client = self.clients[ident]
# check if reward&isOver is valid
# in the first message, only state is valid
if len(client.memory) > 0:
client.memory[-1].reward = reward
if isOver:
self._on_episode_over(ident)
else:
self._on_datapoint(ident)
# feed state and return action
self._on_state(state, ident)
@abstractmethod
def _on_state(self, state, ident):
"""response to state sent by ident. Preferrably an async call"""
@abstractmethod
def _on_episode_over(self, client):
""" callback when the client just finished an episode.
You may want to clear the client's memory in this callback.
"""
def _on_datapoint(self, client):
""" callback when the client just finished a transition
"""
def __del__(self):
self.context.destroy(linger=0)
class SimulatorProcessDF(SimulatorProcessBase):
""" A simulator which contains a forward model itself, allowing
it to produce data points directly """
def __init__(self, idx, pipe_c2s):
super(SimulatorProcessDF, self).__init__(idx)
self.pipe_c2s = pipe_c2s
def run(self):
self.player = self._build_player()
self.ctx = zmq.Context()
self.c2s_socket = self.ctx.socket(zmq.PUSH)
self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
self.c2s_socket.set_hwm(5)
self.c2s_socket.connect(self.pipe_c2s)
self._prepare()
for dp in self.get_data():
self.c2s_socket.send(dumps(dp), copy=False)
@abstractmethod
def _prepare(self):
pass
@abstractmethod
def get_data(self):
pass
class SimulatorProcessSharedWeight(SimulatorProcessDF):
""" A simulator process with an extra thread waiting for event,
and take shared weight from shm.
Start me under some CUDA_VISIBLE_DEVICES set!
"""
def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config):
super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s)
self.condvar = condvar
self.shared_dic = shared_dic
self.pred_config = pred_config
def _prepare(self):
disable_layer_logging()
self.predictor = OfflinePredictor(self.pred_config)
with self.predictor.graph.as_default():
vars_to_update = self._params_to_update()
self.sess_updater = SessionUpdate(
self.predictor.session, vars_to_update)
# TODO setup callback for explore?
self.predictor.graph.finalize()
self.weight_lock = threading.Lock()
# start a thread to wait for notification
def func():
self.condvar.acquire()
while True:
self.condvar.wait()
self._trigger_evt()
self.evt_th = threading.Thread(target=func)
self.evt_th.daemon = True
self.evt_th.start()
def _trigger_evt(self):
with self.weight_lock:
self.sess_updater.update(self.shared_dic['params'])
logger.info("Updated.")
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
class WeightSync(Callback):
""" Sync weight from main process to shared_dic and notify"""
def __init__(self, condvar, shared_dic):
self.condvar = condvar
self.shared_dic = shared_dic
def _setup_graph(self):
self.vars = self._params_to_update()
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
def _before_train(self):
self._sync()
def _trigger_epoch(self):
self._sync()
def _sync(self):
logger.info("Updating weights ...")
dic = {v.name: v.eval() for v in self.vars}
self.shared_dic['params'] = dic
self.condvar.acquire()
self.condvar.notify_all()
self.condvar.release()
if __name__ == '__main__':
import random
from tensorpack.RL import NaiveRLEnvironment
class NaiveSimulator(SimulatorProcess):
def _build_player(self):
return NaiveRLEnvironment()
class NaiveActioner(SimulatorActioner):
def _get_action(self, state):
time.sleep(1)
return random.randint(1, 12)
def _on_episode_over(self, client):
#print("Over: ", client.memory)
client.memory = []
client.state = 0
name = 'ipc://whatever'
procs = [NaiveSimulator(k, name) for k in range(10)]
[k.start() for k in procs]
th = NaiveActioner(name)
ensure_proc_terminate(procs)
th.start()
import time
time.sleep(100)
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test defid shutdown."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(DefiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
proxy_manager.py | from bs4 import BeautifulSoup
import time
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import requests
import threading
import queue
import atexit
class ProxyManager:
def __init__(self):
self.proxy_stack = []
self.psm_queue = queue.Queue()
t = threading.Thread(target=self.proxy_stack_manager, daemon=True)
t.start()
def _stop_psm(self):
self.psm_queue.put("stop")
def request_new_proxy(self):
while not self.proxy_stack:
pass
proxy = self.proxy_stack.pop(0) # get a new proxy off the stack
self.psm_queue.put(None) # let the psm know that the stack may need to be refilled
return proxy
def proxy_stack_manager(self):
while True:
sig = self.psm_queue.get()
if sig=="stop":
return
if len(self.proxy_stack) < 3:
#print("[debug:psm] filling proxy stack")
self.fill_proxy_stack()
def fill_proxy_stack(self):
working_proxies = []
tasks = []
with ThreadPoolExecutor(max_workers=64) as executor:
for ip in self.get_proxy_list():
tasks.append(executor.submit(self.test_proxy, ip))
for task in as_completed(tasks):
ip, result = task.result()
if result:
working_proxies.append(ip)
self.proxy_stack = self.proxy_stack+working_proxies
def test_proxy(self, ip):
try:
r = requests.get("https://api.ipify.org",
proxies={"http": ip, "https": ip}, timeout=2)
return ip, True
except KeyboardInterrupt:
quit()
except Exception as e:
return ip, False
def get_proxy_list(require_https=True, verbose=False):
r = requests.get("https://www.sslproxies.org/")
soup = BeautifulSoup(r.text, "html.parser")
proxies = []
for row in soup.find(id="proxylisttable").find_all("tr"):
row_data = list(map(lambda td: td.get_text(), row.find_all("td")))
if not row_data:
continue
ip, port, code, country, anon, google, https, last_checked = row_data
if https == "yes" and require_https:
proxies.append(ip+":"+port)
return proxies
|
sc.py | #!/usr/bin/env python3
import tempfile
import plumbum
from plumbum.cmd import losetup, mkfs, mount, umount
import os
import parted
from multiprocessing import Process, Value
import signal
import time
# Nothing here is thread safe, but will be process safe!
run = Value('b', True)
class BlockDevice(object):
def __init__(self, size_mib):
# Lets create a sparse file with the user size, and set it up with a
# loop back
self.dir = tempfile.mkdtemp('_storage_churn')
self.fn = self.dir + '/block_device'
with open(self.fn, 'ab') as bd:
bd.truncate(size_mib * (1024 * 1024))
self.device = str.strip(losetup('-f', '--show', self.fn))
def destroy(self):
# unloop the device and delete the file and directory!
losetup('-d', self.device)
os.remove(self.fn)
os.rmdir(self.dir)
def path(self):
return self.device
def __str__(self):
return self.device
class Partition(object):
def __init__(self, block_device_obj):
device = parted.getDevice(block_device_obj.path())
disk = parted.freshDisk(device, 'msdos')
geometry = parted.Geometry(device=device, start=1,
length=device.getLength() - 1)
filesystem = parted.FileSystem(type='ext4', geometry=geometry)
partition = parted.Partition(disk=disk, type=parted.PARTITION_NORMAL,
fs=filesystem, geometry=geometry)
disk.addPartition(partition=partition,
constraint=device.optimalAlignedConstraint)
disk.commit()
self.partition_path = partition.path
def destroy(self):
pass
def path(self):
return self.partition_path
def __str__(self):
return self.partition_path
class Filesystem(object):
def __init__(self, device_or_partition_path):
self.path = device_or_partition_path
# Make the FS and mount it!
mkfs('-t', 'ext4', self.path)
# Make a temporary directory
self.dir = tempfile.mkdtemp('_storage_churn_mount')
mount(self.path, self.dir)
def destroy(self):
# Unmount
for _ in range(0, 10):
try:
umount(self.dir)
os.rmdir(self.dir)
break
except plumbum.commands.processes.ProcessExecutionError:
time.sleep(0.1)
def path(self):
return self.dir
def __str__(self):
return self.dir
class ConstructionSequence(object):
def __init__(self):
self.stack = []
def build_up(self, f):
self.stack.append(f)
def tear_down(self):
while len(self.stack):
self.stack.pop().destroy()
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def simple_worker():
init_worker()
cs = ConstructionSequence()
while run.value:
b = BlockDevice(100)
cs.build_up(b)
# print(b)
part = Partition(b)
cs.build_up(part)
# print(p)
fs = Filesystem(part)
cs.build_up(fs)
# print(fs)
cs.tear_down()
def handler(signum, frame):
global run
run.value = False
print('Waiting for a clean shutdown...')
if __name__ == '__main__':
pl = []
signal.signal(signal.SIGINT, handler)
for i in range(0, 10):
p = Process(target=simple_worker)
p.start()
pl.append(p)
for p in pl:
p.join()
|
youtube-dl-server.py | import json
import subprocess
from queue import Queue
import time
from bottle import run, Bottle, request, static_file, response, redirect, template, get
from threading import Thread
from bottle_websocket import GeventWebSocketServer
from bottle_websocket import websocket
from socket import error
class WSAddr:
def __init__(self):
self.wsClassVal = ''
app = Bottle()
port = 8080
@get('/')
def dl_queue_list():
return template("./static/template/login.tpl", msg="")
@get('/login', method='POST')
def dl_queue_login():
with open('Auth.json') as data_file:
data = json.load(data_file) # Auth info, when docker run making file
req_id = request.forms.get("id")
req_pw = request.forms.get("myPw")
if (req_id == data["MY_ID"] and req_pw == data["MY_PW"]):
response.set_cookie("account", req_id, secret="34y823423b23b4234#$@$@#be")
redirect("/youtube-dl")
else:
return template("./static/template/login.tpl", msg="id or password is not correct")
@get('/youtube-dl')
def dl_queue_list():
with open('Auth.json') as data_file:
data = json.load(data_file)
userNm = request.get_cookie("account", secret="34y823423b23b4234#$@$@#be")
print("CHK : ", userNm)
if (userNm == data["MY_ID"]):
return template("./static/template/index.tpl", userNm=userNm)
else:
print("no cookie or fail login")
redirect("/")
@get('/websocket', apply=[websocket])
def echo(ws):
while True:
WSAddr.wsClassVal = ws
msg = WSAddr.wsClassVal.receive()
if msg is not None:
a = '[MSG], Started downloading : '
a = a + msg
WSAddr.wsClassVal.send(a)
else:
break
@get('/youtube-dl/static/:filename#.*#')
def server_static(filename):
return static_file(filename, root='./static')
@get('/youtube-dl/q', method='GET')
def q_size():
return {"success": True, "size": json.dumps(list(dl_q.queue))}
@get('/youtube-dl/q', method='POST')
def q_put():
url = request.json.get("url")
resolution = request.json.get("resolution")
if "" != url:
box = (url, WSAddr.wsClassVal, resolution, "web")
dl_q.put(box)
if (Thr.dl_thread.isAlive() == False):
thr = Thr()
thr.restart()
return {"success": True, "msg": '[MSG], We received your download. Please wait.'}
else:
return {"success": False, "msg": "[MSG], download queue somethings wrong."}
@get('/youtube-dl/rest', method='POST')
def q_put_rest():
url = request.json.get("url")
resolution = request.json.get("resolution")
with open('Auth.json') as data_file:
data = json.load(data_file) # Auth info, when docker run making file
req_id = request.json.get("id")
req_pw = request.json.get("pw")
if (req_id != data["MY_ID"] or req_pw != data["MY_PW"]):
return {"success": False, "msg": "Invalid password or account."}
else:
box = (url, "", resolution, "api")
dl_q.put(box)
return {"success": True, "msg": 'download has started', "Remaining downloading count": json.dumps(dl_q.qsize()) }
def dl_worker():
while not done:
item = dl_q.get()
if(item[3]=="web"):
download(item)
else:
download_rest(item)
dl_q.task_done()
def download(url):
# url[1].send("[MSG], [Started] downloading " + url[0] + " resolution below " + url[2])
result=""
if (url[2] == "best"):
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", "--exec", "touch {} && mv {} ./downfolder/", "--merge-output-format", "mp4", url[0]])
elif (url[2] == "audio-m4a"):
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestaudio[ext=m4a]", "--exec", "touch {} && mv {} ./downfolder/", url[0]])
elif (url[2] == "audio-mp3"):
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestaudio[ext=m4a]", "-x", "--audio-format", "mp3", "--exec", "touch {} && mv {} ./downfolder/", url[0]])
else:
resolution = url[2][:-1]
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestvideo[height<="+resolution+"]+bestaudio[ext=m4a]", "--exec", "touch {} && mv {} ./downfolder/", url[0]])
try:
if(result.returncode==0):
url[1].send("[MSG], [Finished] " + url[0] + " resolution below " + url[2]+", Remain download Count "+ json.dumps(dl_q.qsize()))
url[1].send("[QUEUE], Remaining download Count : " + json.dumps(dl_q.qsize()))
url[1].send("[COMPLETE]," + url[2] + "," + url[0])
else:
url[1].send("[MSG], [Finished] downloading failed " + url[0])
url[1].send("[COMPLETE]," + "url access failure" + "," + url[0])
except error:
print("Be Thread Safe")
def download_rest(url):
result=""
if (url[2] == "best"):
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]", "--exec", "touch {} && mv {} ./downfolder/", "--merge-output-format", "mp4", url[0]])
elif (url[2] == "audio"):
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestaudio[ext=m4a]", "--exec", "touch {} && mv {} ./downfolder/", url[0]])
else:
resolution = url[2][:-1]
result = subprocess.run(["youtube-dl", "-o", "./downfolder/.incomplete/%(title)s.%(ext)s", "-f", "bestvideo[height<="+resolution+"]+bestaudio[ext=m4a]", "--exec", "touch {} && mv {} ./downfolder/", url[0]])
class Thr:
def __init__(self):
self.dl_thread = ''
def restart(self):
self.dl_thread = Thread(target=dl_worker)
self.dl_thread.start()
dl_q = Queue()
done = False
Thr.dl_thread = Thread(target=dl_worker)
Thr.dl_thread.start()
with open('Auth.json') as env_file:
data = json.load(env_file) # Auth info, when docker run making file
if (data['APP_PORT'] !=''):
port = data['APP_PORT']
run(host='0.0.0.0', port=port, server=GeventWebSocketServer)
done = True
Thr.dl_thread.join()
|
__init__.py | import sys
import io
import time
import json
import threading
import traceback
import collections
import bisect
try:
import Queue as queue
except ImportError:
import queue
# Patch urllib3 for sending unicode filename
from . import hack
from . import exception
__version_info__ = (1, 0)
__version__ = '.'.join(map(str, __version_info__))
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
- ``passport_data``
- ``poll_data``
An event's flavor is determined by the single top-level key.
"""
our_list = ['poll_id', 'question', 'option_ids']
if 'message_id' in msg and 'passport_data' not in msg:
return 'chat'
elif 'old_chat_member' in msg:
return 'chat'
elif 'message_id' in msg and 'passport_data' in msg:
return 'all_passport_data'
elif 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
elif set(our_list).intersection(msg):
return 'poll_data'
elif 'id' in msg and 'query' in msg:
return 'inline_query'
elif 'result_id' in msg:
return 'chosen_inline_result'
elif 'id' in msg and 'shipping_address' in msg:
return 'shipping_query'
elif 'id' in msg and 'total_amount' in msg:
return 'pre_checkout_query'
else:
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
raise KeyError('No suggested keys %s in %s' % (str(keys), str(d)))
all_content_types = [
'text', 'audio', 'document', 'game', 'photo', 'sticker', 'video', 'voice', 'video_note',
'contact', 'location', 'venue', 'new_chat_member', 'left_chat_member', 'new_chat_title',
'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'new_chat_members', 'invoice', 'successful_payment', 'animation', 'passport_data',
'poll_data', 'voice', 'game', 'chat_member'
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``video_note``, ``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``,
``new_chat_members``, ``invoice``, ``successful_payment``, ``animation``, ``passport_data``, ``poll_data``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
When ``flavor`` is ``shipping_query``
(``msg`` being a `ShippingQuery <https://core.telegram.org/bots/api#shippingquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
When ``flavor`` is ``pre_checkout_query``
(``msg`` being a `PreCheckoutQuery <https://core.telegram.org/bots/api#precheckoutquery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``, ``msg['currency']``, ``msg['total_amount']``)
When ``flavor`` is ``all_passport_data``
(``msg`` being a `PassportData <https://core.telegram.org/bots/api#passportdata>`_ object):
When ``flavor`` is ``poll_data``
(``msg`` being a `Poll <https://core.telegram.org/bots/api#sendPoll>`_ object):
- Anonymous Polls: (``msg['poll_id']``, ``msg['option_ids'], ``msg['user']['id']``)
- Unanonymours Polls: ( ``msg['id']``, ``other_data`` )
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
else:
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
else:
return msg['id'], msg['from']['id'], msg['query']
def gl_all_passport_data():
return msg['chat']['id'], msg['passport_data']
def gl_poll_data():
if 'poll_id' in msg:
return msg['poll_id'], msg['option_ids'], msg['user']['id']
else:
other_data = msg.copy()
other_data.pop('id')
return msg['id'], other_data, None
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
def gl_shipping_query():
return msg['id'], msg['from']['id'], msg['invoice_payload']
def gl_pre_checkout_query():
if long:
return msg['id'], msg['from']['id'], msg['invoice_payload'], msg['currency'], msg['total_amount']
else:
return msg['id'], msg['from']['id'], msg['invoice_payload']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'all_passport_data': gl_all_passport_data,
'chosen_inline_result': gl_chosen_inline_result,
'shipping_query': gl_shipping_query,
'poll_data': gl_poll_data,
'pre_checkout_query': gl_pre_checkout_query}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`telepot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f, g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`telepot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
elif len(f) == 1:
return {'inline_message_id': f[0]}
else:
raise ValueError()
else:
return {'inline_message_id': f}
def _split_input_media_array(media_array):
def ensure_dict(input_media):
if isinstance(input_media, tuple) and hasattr(input_media, '_asdict'):
return input_media._asdict()
elif isinstance(input_media, dict):
return input_media
else:
raise ValueError()
def given_attach_name(input_media):
if isinstance(input_media['media'], tuple):
return input_media['media'][0]
else:
return None
def attach_name_generator(used_names):
x = 0
while 1:
x += 1
name = 'media' + str(x)
if name in used_names:
continue
yield name
def split_media(input_media, name_generator):
file_spec = input_media['media']
# file_id, URL
if _isstring(file_spec):
return (input_media, None)
# file-object
# (attach-name, file-object)
# (attach-name, (filename, file-object))
if isinstance(file_spec, tuple):
name, f = file_spec
else:
name, f = next(name_generator), file_spec
m = input_media.copy()
m['media'] = 'attach://' + name
return (m, (name, f))
ms = [ensure_dict(m) for m in media_array]
used_names = [given_attach_name(m) for m in ms if given_attach_name(m) is not None]
name_generator = attach_name_generator(used_names)
splitted = [split_media(m, name_generator) for m in ms]
legal_media, attachments = map(list, zip(*splitted))
files_to_attach = dict([a for a in attachments if a is not None])
return (legal_media, files_to_attach)
_string_type = str
_file_type = io.IOBase
def _isstring(s):
return isinstance(s, _string_type)
def _isfile(f):
return isinstance(f, _file_type)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase(object):
def __init__(self, token):
self._token = token
self._file_chunk_size = 65536
def _strip(params, more=[]):
return {key: value for key, value in params.items() if key not in ['self'] + more}
def _rectify(params):
# Update markdown to use markdownV2(enhanced version of previous markdown) without need of
# specifying it in parse_mode
# if params.get('parse_mode', None):
# if params['parse_mode'].lower() == 'markdown':
# params.update({'parse_mode': "MarkdownV2"})
def make_jsonable(value):
if isinstance(value, list):
return [make_jsonable(v) for v in value]
elif isinstance(value, dict):
return {k: make_jsonable(v) for k, v in value.items() if v is not None}
elif isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k: make_jsonable(v) for k, v in value._asdict().items() if v is not None}
else:
return value
def flatten(value):
v = make_jsonable(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',', ':'))
else:
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k, v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._event_handler = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
elif id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
else:
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time() + delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data() # call the data-producing function
if d is not None:
self._event_handler(d)
else:
self._event_handler(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def run_as_thread(self):
self.daemon = True
self.start()
def on_event(self, fn):
self._event_handler = fn
def __init__(self, token):
super(Bot, self).__init__(token)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg),
'all_passport_data': lambda msg: self.on_passport_data(msg),
'poll_data': lambda msg: self.on_poll_data(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, **kwargs):
return api.request((self._token, method, params, files), **kwargs)
def _api_request_with_file(self, method, params, file_key, file_value, extra_files=None, **kwargs):
if _isstring(file_value):
if extra_files:
params[extra_files[0]] = kwargs[extra_files[1]]
params[file_key] = file_value
return self._api_request(method, _rectify(params), kwargs)
else:
files = {file_key: file_value}
if extra_files:
files.update({extra_files[0]: extra_files[1]})
return self._api_request(method, _rectify(params), files, **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def sendMessage(self, chat_id, text,
parse_mode=None,
entities=None,
allow_sending_without_reply=None,
disable_web_page_preview=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id, from_chat_id, message_id,
disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def sendPhoto(self, chat_id, photo,
entities=None,
allow_sending_without_reply=None,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object). If the filename contains
non-ASCII characters.
"""
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('sendPhoto', _rectify(p), 'photo', photo)
def sendAudio(self, chat_id, audio,
caption=None,
parse_mode=None,
entities=None,
allow_sending_without_reply=None,
duration=None,
performer=None,
title=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio'])
return self._api_request_with_file('sendAudio', _rectify(p), 'audio', audio)
def sendDocument(self, chat_id, document,
thumb=None,
caption=None,
caption_entities=None,
disable_content_type_detection=None,
parse_mode=None,
entities=None,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document)
def sendVideo(self, chat_id, video,
duration=None,
width=None,
height=None,
caption=None,
parse_mode=None,
entities=None,
allow_sending_without_reply=None,
supports_streaming=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video'])
return self._api_request_with_file('sendVideo', _rectify(p), 'video', video)
def sendAnimation(self, chat_id, animation,
duration=None,
width=None,
height=None,
thumb=None,
caption=None,
parse_mode=None,
entities=None,
allow_sending_without_reply=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendanimation
:param animation: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
if thumb:
p = _strip(locals(), more=['animation', 'thumb'])
return self._api_request_with_file('sendAnimation', _rectify(p), 'animation', animation,
extra_files=['thumb', thumb])
else:
p = _strip(locals(), more=['animation'])
return self._api_request_with_file('sendAnimation', _rectify(p), 'animation', animation)
def sendVoice(self, chat_id, voice,
caption=None,
parse_mode=None,
entities=None,
allow_sending_without_reply=None,
duration=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._api_request_with_file('sendVoice', _rectify(p), 'voice', voice)
def sendVideoNote(self, chat_id, video_note,
duration=None,
length=None,
allow_sending_without_reply=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
:param length:
Although marked as optional, this method does not seem to work without
it being specified. Supply any integer you want. It seems to have no effect
on the video note's display size.
"""
p = _strip(locals(), more=['video_note'])
return self._api_request_with_file('sendVideoNote', _rectify(p), 'video_note', video_note)
def sendMediaGroup(self, chat_id, media,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None):
"""
See: https://core.telegram.org/bots/api#sendmediagroup
:type media: array of `InputMedia <https://core.telegram.org/bots/api#inputmedia>`_ objects
:param media:
To indicate media locations, each InputMedia object's ``media`` field
should be one of these:
- string: ``file_id`` for a file existing on Telegram servers
- string: HTTP URL of a file from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (form-data name, file-like object)
- tuple: (form-data name, (filename, file-like object))
In case of uploading, you may supply customized multipart/form-data
names for each uploaded file (as in last 2 options above). Otherwise,
telepot assigns unique names to each uploaded file. Names assigned by
telepot will not collide with user-supplied names, if any.
"""
p = _strip(locals(), more=['media'])
legal_media, files_to_attach = _split_input_media_array(media)
p['media'] = legal_media
return self._api_request('sendMediaGroup', _rectify(p), files_to_attach)
def editMessageMedia(self, msg_identifier, media, inline_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagemedia
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
:param media: Same as in :meth:`.Bot.sendMedia`
:param inline_message_id: self explanatory`
"""
p = _strip(locals(), more=['msg_identifier', 'media'])
legal_media, files_to_attach = _split_input_media_array(media)
p.update(_dismantle_message_identifier(msg_identifier))
p['media'] = legal_media
return self._api_request('editMessageMedia', _rectify(p), files_to_attach)
def copyMessage(self, chat_id, from_chat_id, message_id,
caption=None,
caption_entities=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
disable_notification=None,
parse_mode=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#copymessage """
p = _strip(locals())
return self._api_request('copyMessage', _rectify(p))
def sendLocation(self, chat_id, latitude, longitude,
live_period=None,
horizontal_accuracy=None,
heading=None,
proximity_alert_radius=None,
allow_sending_without_reply=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def editMessageLiveLocation(self, msg_identifier, latitude, longitude,
horizontal_accuracy=None,
heading=None,
proximity_alert_radius=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageLiveLocation', _rectify(p))
def stopMessageLiveLocation(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stopmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopMessageLiveLocation', _rectify(p))
def sendVenue(self, chat_id, latitude, longitude, title, address,
foursquare_id=None,
foursquare_type=None,
google_place_id=None,
google_place_type=None,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id, phone_number, first_name,
last_name=None,
vcard=None,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendGame(self, chat_id, game_short_name,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendInvoice(self, chat_id, title, description, payload,
provider_token, start_parameter, currency, prices,
provider_data=None,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendinvoice """
p = _strip(locals())
return self._api_request('sendInvoice', _rectify(p))
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id,
offset=None,
limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def createChatInviteLink(self, chat_id, expire_date=None,
member_limit=None):
""" See: https://core.telegram.org/bots/api#createChatInviteLink """
p = _strip(locals())
return self._api_request('createChatInviteLink', _rectify(p))
def editChatInviteLink(self, chat_id, invite_link,
expire_date=None, member_limit=None):
""" See: https://core.telegram.org/bots/api#editChatInviteLink """
p = _strip(locals())
return self._api_request('editChatInviteLink', _rectify(p))
def revokeChatInviteLink(self, chat_id, invite_link):
""" See: https://core.telegram.org/bots/api#revokeChatInviteLink """
p = _strip(locals())
return self._api_request('revokeChatInviteLink', _rectify(p))
def kickChatMember(self, chat_id, user_id,
until_date=None, revoke_messages=None):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def unbanChatMember(self, chat_id, user_id, only_if_banned=None):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def restrictChatMember(self, chat_id, user_id, permissions,
until_date=None):
""" See: https://core.telegram.org/bots/api#restrictchatmember """
p = _strip(locals())
return self._api_request('restrictChatMember', _rectify(p))
def promoteChatMember(self, chat_id, user_id,
is_anonymous = None,
can_manage_chat = None,
can_manage_voice_chats = None,
can_change_info=None,
can_post_messages=None,
can_edit_messages=None,
can_delete_messages=None,
can_invite_users=None,
can_restrict_members=None,
can_pin_messages=None,
can_promote_members=None):
""" See: https://core.telegram.org/bots/api#promotechatmember """
p = _strip(locals())
return self._api_request('promoteChatMember', _rectify(p))
def exportChatInviteLink(self, chat_id):
""" See: https://core.telegram.org/bots/api#exportchatinvitelink """
p = _strip(locals())
return self._api_request('exportChatInviteLink', _rectify(p))
def getMyCommands(self):
""" See: https://core.telegram.org/bots/api#getmycommands """
p = _strip(locals())
return self._api_request('getMyCommands', _rectify(p))
def setMyCommands(self, commands):
""" See: https://core.telegram.org/bots/api#setmycommands """
p = _strip(locals())
return self._api_request('setMyCommands', _rectify(p))
def setChatAdministratorCustomTitle(self, chat_id, user_id, custom_title):
""" See: https://core.telegram.org/bots/api#setchatadministratorcustomtitle """
p = _strip(locals())
return self._api_request('setChatAdministratorCustomTitle', _rectify(p))
def setChatPermissions(self, chat_id):
""" See: https://core.telegram.org/bots/api#setchatpermissions """
p = _strip(locals())
return self._api_request('setChatPermissions', _rectify(p))
def setChatPhoto(self, chat_id, photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)
def deleteChatPhoto(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatphoto """
p = _strip(locals())
return self._api_request('deleteChatPhoto', _rectify(p))
def setChatTitle(self, chat_id, title):
""" See: https://core.telegram.org/bots/api#setchattitle """
p = _strip(locals())
return self._api_request('setChatTitle', _rectify(p))
def setChatDescription(self, chat_id,
description=None):
""" See: https://core.telegram.org/bots/api#setchatdescription """
p = _strip(locals())
return self._api_request('setChatDescription', _rectify(p))
def pinChatMessage(self, chat_id, message_id,
disable_notification=None):
""" See: https://core.telegram.org/bots/api#pinchatmessage """
p = _strip(locals())
return self._api_request('pinChatMessage', _rectify(p))
def unpinChatMessage(self, chat_id, message_id=None):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return self._api_request('unpinChatMessage', _rectify(p))
def unpinAllChatMessages(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinallchatmessages """
p = _strip(locals())
return self._api_request('unpinAllChatMessages', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def setChatStickerSet(self, chat_id, sticker_set_name):
""" See: https://core.telegram.org/bots/api#setchatstickerset """
p = _strip(locals())
return self._api_request('setChatStickerSet', _rectify(p))
def deleteChatStickerSet(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatstickerset """
p = _strip(locals())
return self._api_request('deleteChatStickerSet', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None,
show_alert=None,
url=None,
cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def answerShippingQuery(self, shipping_query_id, ok,
shipping_options=None,
error_message=None):
""" See: https://core.telegram.org/bots/api#answershippingquery """
p = _strip(locals())
return self._api_request('answerShippingQuery', _rectify(p))
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
def editMessageText(self, msg_identifier, text,
parse_mode=None,
entities=None,
disable_web_page_preview=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`telepot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier,
caption=None,
entities=None,
parse_mode=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageReplyMarkup(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def deleteMessage(self, msg_identifier):
"""
See: https://core.telegram.org/bots/api#deletemessage
:param msg_identifier:
Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`,
except this method does not work on inline messages.
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('deleteMessage', _rectify(p))
def sendPoll(self, chat_id, question, options, is_anonymous=None, type=None, allows_multiple_answers=None,
entities=None,
allow_sending_without_reply=None,
correct_option_id=None, explanation=None, explanation_parse_mode=None, open_period=None,
close_date=None, is_closed=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendpoll """
p = _strip(locals())
return self._api_request('sendPoll', _rectify(p))
def stopPoll(self, msg_identifier, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stoppoll
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopPoll', _rectify(p))
def sendDice(self, chat_id, text,
emoji=None,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
"""
Choose between different animations (``dice``, ``darts``, ``basketball``) by specifying the ``emoji`` parameter
See: https://core.telegram.org/bots/api#senddice
"""
p = _strip(locals())
return self._api_request('sendDice', _rectify(p))
def sendSticker(self, chat_id, sticker,
disable_notification=None,
reply_to_message_id=None,
allow_sending_without_reply=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._api_request_with_file('sendSticker', _rectify(p), 'sticker', sticker)
def getStickerSet(self, name):
"""
See: https://core.telegram.org/bots/api#getstickerset
"""
p = _strip(locals())
return self._api_request('getStickerSet', _rectify(p))
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), 'png_sticker', png_sticker)
def createNewStickerSet(self, user_id, name, title, emojis, png_sticker=None, tgs_sticker=None,
contains_masks=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#createnewstickerset
"""
if png_sticker:
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p), 'png_sticker', png_sticker)
elif tgs_sticker:
p = _strip(locals(), more=['tgs_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p), 'tgs_sticker', tgs_sticker)
else:
raise ValueError('You must use exactly one of the fields png_sticker or tgs_sticker')
def addStickerToSet(self, user_id, name, emojis, png_sticker=None, tgs_sticker=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#addstickertoset
"""
if png_sticker:
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p), 'png_sticker', png_sticker)
elif tgs_sticker:
p = _strip(locals(), more=['tgs_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p), 'tgs_sticker', tgs_sticker)
else:
raise ValueError('You must use exactly one of the fields png_sticker or tgs_sticker')
def setStickerSetThumb(self, name, user_id, thumb=None):
"""
See: https://core.telegram.org/bots/api#setstickersetthumb
"""
if thumb:
p = _strip(locals(), more=['thumb'])
return self._api_request_with_file('setStickerSetThumb', _rectify(p), 'thumb', thumb)
else:
p = _strip(locals())
return self._api_request('setStickerSetThumb', _rectify(p))
def setStickerPositionInSet(self, sticker, position):
"""
See: https://core.telegram.org/bots/api#setstickerpositioninset
"""
p = _strip(locals())
return self._api_request('setStickerPositionInSet', _rectify(p))
def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return self._api_request('deleteStickerFromSet', _rectify(p))
def answerInlineQuery(self, inline_query_id, results,
cache_time=None,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self,
offset=None,
limit=None,
timeout=None,
allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p))
def setWebhook(self,
url=None,
ip_address=None,
certificate=None,
max_connections=None,
allowed_updates=None,
drop_pending_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
else:
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self, drop_pending_updates=None):
""" See: https://core.telegram.org/bots/api#deletewebhook """
p = _strip(locals())
return self._api_request('deleteWebhook', _rectify(p))
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setPassportDataErrors(self, chat_id, errors):
""" See: https://core.telegram.org/bots/api#setpassportdataerrors """
p = _strip(locals())
return self._api_request('setPassportDataErrors', _rectify(p))
def setGameScore(self, user_id, score, game_message_identifier,
force=None,
disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param file_id: ``file_id`` from telegram
:param dest: a path or a ``file`` object
:return file bytes if destination path exists otherwise does not return anything
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb') if dest else None
r = api.download((self._token, f['file_path']), dest, preload_content=False)
if type(r) == bytes:
return r
else:
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if dest:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
:deprecated: will be removed in future. Use :class:`.MessageLoop` instead.
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue (``queue.Queue`` in Python 3), new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how telepot can be integrated with webhooks.
Acceptable contents in queue:
- ``str``, ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'passport_data',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query',
'poll',
'poll_answer',
'my_chat_member',
'chat_member'])
collect_queue.put(update[key])
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def get_from_queue_unordered(qu):
dictify = dictify3
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
dictify = dictify3
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(
buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id'] - max_id - 1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler.on_event(collect_queue.put)
self._scheduler.run_as_thread()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
import inspect
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p + ({},) for p in delegation_patterns]
def _startable(self, delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
def _tuple_is_valid(self, t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
elif callable(delegate):
return threading.Thread(target=delegate)
elif type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
else:
raise RuntimeError(
'Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
|
process.py | """JupyterLab Server process handler"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import atexit
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
import weakref
from shutil import which as _which
from tornado import gen
try:
import pty
except ImportError:
pty = False
if sys.platform == "win32":
list2cmdline = subprocess.list2cmdline
else:
def list2cmdline(cmd_list):
import pipes
return " ".join(map(pipes.quote, cmd_list))
logging.basicConfig(format="%(message)s", level=logging.INFO)
def which(command, env=None):
"""Get the full path to a command.
Parameters
----------
command: str
The command name or path.
env: dict, optional
The environment variables, defaults to `os.environ`.
"""
env = env or os.environ
path = env.get("PATH") or os.defpath
command_with_path = _which(command, path=path)
# Allow nodejs as an alias to node.
if command == "node" and not command_with_path:
command = "nodejs"
command_with_path = _which("nodejs", path=path)
if not command_with_path:
if command in ["nodejs", "node", "npm"]:
msg = "Please install Node.js and npm before continuing installation. You may be able to install Node.js from your package manager, from conda, or directly from the Node.js website (https://nodejs.org)."
raise ValueError(msg)
raise ValueError("The command was not found or was not " + "executable: %s." % command)
return os.path.abspath(command_with_path)
class Process:
"""A wrapper for a child process."""
_procs = weakref.WeakSet()
_pool = None
def __init__(self, cmd, logger=None, cwd=None, kill_event=None, env=None, quiet=False):
"""Start a subprocess that can be run asynchronously.
Parameters
----------
cmd: list
The command to run.
logger: :class:`~logger.Logger`, optional
The logger instance.
cwd: string, optional
The cwd of the process.
env: dict, optional
The environment for the process.
kill_event: :class:`~threading.Event`, optional
An event used to kill the process operation.
quiet: bool, optional
Whether to suppress output.
"""
if not isinstance(cmd, (list, tuple)):
raise ValueError("Command must be given as a list")
if kill_event and kill_event.is_set():
raise ValueError("Process aborted")
self.logger = logger = logger or logging.getLogger("jupyterlab")
self._last_line = ""
if not quiet:
self.logger.info(f"> {list2cmdline(cmd)}")
self.cmd = cmd
kwargs = {}
if quiet:
kwargs["stdout"] = subprocess.DEVNULL
self.proc = self._create_process(cwd=cwd, env=env, **kwargs)
self._kill_event = kill_event or threading.Event()
Process._procs.add(self)
def terminate(self):
"""Terminate the process and return the exit code."""
proc = self.proc
# Kill the process.
if proc.poll() is None:
os.kill(proc.pid, signal.SIGTERM)
# Wait for the process to close.
try:
proc.wait(timeout=2.0)
except subprocess.TimeoutExpired:
if os.name == "nt":
sig = signal.SIGBREAK
else:
sig = signal.SIGKILL
if proc.poll() is None:
os.kill(proc.pid, sig)
finally:
Process._procs.remove(self)
return proc.wait()
def wait(self):
"""Wait for the process to finish.
Returns
-------
The process exit code.
"""
proc = self.proc
kill_event = self._kill_event
while proc.poll() is None:
if kill_event.is_set():
self.terminate()
raise ValueError("Process was aborted")
time.sleep(1.0)
return self.terminate()
@gen.coroutine
def wait_async(self):
"""Asynchronously wait for the process to finish."""
proc = self.proc
kill_event = self._kill_event
while proc.poll() is None:
if kill_event.is_set():
self.terminate()
raise ValueError("Process was aborted")
yield gen.sleep(1.0)
raise gen.Return(self.terminate())
def _create_process(self, **kwargs):
"""Create the process."""
cmd = self.cmd
kwargs.setdefault("stderr", subprocess.STDOUT)
cmd[0] = which(cmd[0], kwargs.get("env"))
if os.name == "nt":
kwargs["shell"] = True
proc = subprocess.Popen(cmd, **kwargs)
return proc
@classmethod
def _cleanup(cls):
"""Clean up the started subprocesses at exit."""
for proc in list(cls._procs):
proc.terminate()
class WatchHelper(Process):
"""A process helper for a watch process."""
def __init__(self, cmd, startup_regex, logger=None, cwd=None, kill_event=None, env=None):
"""Initialize the process helper.
Parameters
----------
cmd: list
The command to run.
startup_regex: string
The regex to wait for at startup.
logger: :class:`~logger.Logger`, optional
The logger instance.
cwd: string, optional
The cwd of the process.
env: dict, optional
The environment for the process.
kill_event: callable, optional
A function to call to check if we should abort.
"""
super().__init__(cmd, logger=logger, cwd=cwd, kill_event=kill_event, env=env)
if not pty:
self._stdout = self.proc.stdout
while 1:
line = self._stdout.readline().decode("utf-8")
if not line:
raise RuntimeError("Process ended improperly")
print(line.rstrip())
if re.match(startup_regex, line):
break
self._read_thread = threading.Thread(target=self._read_incoming)
self._read_thread.setDaemon(True)
self._read_thread.start()
def terminate(self):
"""Terminate the process."""
proc = self.proc
if proc.poll() is None:
if os.name != "nt":
# Kill the process group if we started a new session.
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
else:
os.kill(proc.pid, signal.SIGTERM)
# Wait for the process to close.
try:
proc.wait()
finally:
Process._procs.remove(self)
return proc.returncode
def _read_incoming(self):
"""Run in a thread to read stdout and print"""
fileno = self._stdout.fileno()
while 1:
try:
buf = os.read(fileno, 1024)
except OSError as e:
self.logger.debug("Read incoming error %s", e)
return
if not buf:
return
print(buf.decode("utf-8"), end="")
def _create_process(self, **kwargs):
"""Create the watcher helper process."""
kwargs["bufsize"] = 0
if pty:
master, slave = pty.openpty()
kwargs["stderr"] = kwargs["stdout"] = slave
kwargs["start_new_session"] = True
self._stdout = os.fdopen(master, "rb")
else:
kwargs["stdout"] = subprocess.PIPE
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs["startupinfo"] = startupinfo
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
kwargs["shell"] = True
return super()._create_process(**kwargs)
# Register the cleanup handler.
atexit.register(Process._cleanup)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.