source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
bot.py |
import os
import sys
import threading
import discord
import pyglet
from dotenv import load_dotenv
from firework_parsing import firework_parsing
def extract_message(message_body, prefix):
if message_body.lower().startswith(prefix.lower()):
return str.strip(message_body[len(prefix):])
return None
def main():
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
from window import window
draw_thread = threading.Thread(target=lambda: window.run(None))
draw_thread.start()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
for guild in client.guilds:
print('The following guilds are using the server: ' + str(guild))
@client.event
async def on_message(message):
if message.author == client.user:
return
# We're only interested in messages in the #fireworks channels (allow for
# #fireworks-testing
if 'fireworks' not in message.channel.name:
return
command = message.content
if client.user in message.mentions:
no_exclaimations = message.content.replace('!', '')
command = extract_message(no_exclaimations, client.user.mention)
firework = firework_parsing.parse_firework(command)
if firework is None:
print(f'Unrecognised command: {command}', file=sys.stderr)
await message.add_reaction('❌')
else:
pyglet.clock.schedule_once(firework, 0)
await message.add_reaction('🎇')
client.run(TOKEN)
if __name__ == "__main__":
main() |
DataLoader.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 20:11:53 2021
@author: gbatz97
"""
import numpy as np
import multiprocessing
import queue
from itertools import cycle
#This file implements a dataloader which does prefetching while the GPU is running.
#It intends to accelerate dataloading and reduce the probability that dataloading becomes a bottleneck.
#Apart from prefetching which is implemented here, we can also look at the NVIDIA DALI library for faster dataloading.
def default_collate(batch):
if isinstance(batch[0], np.ndarray):
return np.stack(batch)
if isinstance(batch[0], (int, float)):
return np.array(batch)
if isinstance(batch[0], (list, tuple)):
return tuple(default_collate(var) for var in zip(*batch))
class NaiveDataLoader:
def __init__(self, dataset, batch_size=64, collate_fn=default_collate):
self.index = 0
self.dataset = dataset
self.batch_size = batch_size
self.collate_fn = collate_fn
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index >= len(self.dataset):
raise StopIteration
batch_size = min(len(self.dataset) - self.index, self.batch_size)
return self.collate_fn([self.get() for _ in range(batch_size)])
def get(self):
item = self.dataset[self.index]
self.index += 1
return item
def worker_fn(dataset, index_queue, output_queue):
while True:
# Worker function, simply reads indices from index_queue, and adds the
# dataset element to the output_queue
try:
index = index_queue.get(timeout=0)
except queue.Empty:
continue
if index is None:
break
output_queue.put((index, dataset[index]))
class DataLoader(NaiveDataLoader):
def __init__(
self,
dataset,
batch_size=64,
num_workers=1,
prefetch_batches=2,
collate_fn=default_collate,
):
super().__init__(dataset, batch_size, collate_fn)
self.num_workers = num_workers
self.prefetch_batches = prefetch_batches
self.output_queue = multiprocessing.Queue()
self.index_queues = []
self.workers = []
self.worker_cycle = cycle(range(num_workers))
self.cache = {}
self.prefetch_index = 0
for _ in range(num_workers):
index_queue = multiprocessing.Queue()
worker = multiprocessing.Process(
target=worker_fn, args=(self.dataset, index_queue, self.output_queue)
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.index_queues.append(index_queue)
self.prefetch()
def prefetch(self):
while (
self.prefetch_index < len(self.dataset)
and self.prefetch_index
< self.index + 2 * self.num_workers * self.batch_size
):
# if the prefetch_index hasn't reached the end of the dataset
# and it is not 2 batches ahead, add indexes to the index queues
self.index_queues[next(self.worker_cycle)].put(self.prefetch_index)
self.prefetch_index += 1
def __iter__(self):
self.index = 0
self.cache = {}
self.prefetch_index = 0
self.prefetch()
return self
def get(self):
self.prefetch()
if self.index in self.cache:
item = self.cache[self.index]
del self.cache[self.index]
else:
while True:
try:
(index, data) = self.output_queue.get(timeout=0)
except queue.Empty: # output queue empty, keep trying
continue
if index == self.index: # found our item, ready to return
item = data
break
else: # item isn't the one we want, cache for later
self.cache[index] = data
self.index += 1
return item
def __del__(self):
try:
for i, w in enumerate(self.workers):
self.index_queues[i].put(None)
w.join(timeout=5.0)
for q in self.index_queues:
q.cancel_join_thread()
q.close()
self.output_queue.cancel_join_thread()
self.output_queue.close()
finally:
for w in self.workers:
if w.is_alive():
w.terminate() |
reduction.py | #
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import os
import sys
import socket
import threading
from pickle import Pickler
from .. import current_process
from .._ext import _billiard, win32
from ..util import register_after_fork, debug, sub_debug
is_win32 = sys.platform == 'win32'
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
if not(is_win32 or is_pypy or is_py3k or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
close = win32.CloseHandle if sys.platform == 'win32' else os.close
__all__ = []
# globals set later
_listener = None
_lock = None
_cache = set()
#
# ForkingPickler
#
class ForkingPickler(Pickler): # noqa
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m): # noqa
if m.__self__ is None:
return getattr, (m.__self__.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
from ..forking import duplicate
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from ..connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from ..util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
from ..forking import Popen, duplicate
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
from ..connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
|
training.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import logging
import os
import subprocess
import threading
import time
import sagemaker_containers.beta.framework as framework
import tensorflow as tf
from sagemaker_tensorflow_container import s3_utils
logger = logging.getLogger(__name__)
SAGEMAKER_PARAMETER_SERVER_ENABLED = 'sagemaker_parameter_server_enabled'
MODEL_DIR = '/opt/ml/model'
def _is_host_master(hosts, current_host):
return current_host == hosts[0]
def _build_tf_config(hosts, current_host, ps_task=False):
"""Builds a dictionary containing cluster information based on number of hosts and number of
parameter servers.
Args:
hosts (list[str]): List of host names in the cluster
current_host (str): Current host name
ps_task (bool): Set to True if this config is built for a parameter server process
(default: False)
Returns:
dict[str: dict]: A dictionary describing the cluster setup for distributed training.
For more information regarding TF_CONFIG:
https://cloud.google.com/ml-engine/docs/tensorflow/distributed-training-details
"""
# Assign the first host as the master. Rest of the hosts if any will be worker hosts.
# The first ps_num hosts will also have a parameter task assign to them.
masters = hosts[:1]
workers = hosts[1:]
ps = hosts if len(hosts) > 1 else None
def host_addresses(hosts, port=2222):
return ['{}:{}'.format(host, port) for host in hosts]
tf_config = {
'cluster': {
'master': host_addresses(masters)
},
'environment': 'cloud'
}
if ps:
tf_config['cluster']['ps'] = host_addresses(ps, port='2223')
if workers:
tf_config['cluster']['worker'] = host_addresses(workers)
if ps_task:
if ps is None:
raise ValueError(
'Cannot have a ps task if there are no parameter servers in the cluster')
task_type = 'ps'
task_index = ps.index(current_host)
elif _is_host_master(hosts, current_host):
task_type = 'master'
task_index = 0
else:
task_type = 'worker'
task_index = workers.index(current_host)
tf_config['task'] = {'index': task_index, 'type': task_type}
return tf_config
def _run_ps(env, cluster):
logger.info('Running distributed training job with parameter servers')
cluster_spec = tf.train.ClusterSpec(cluster)
task_index = env.hosts.index(env.current_host)
# Force parameter server to run on cpu. Running multiple TensorFlow processes on the same
# GPU is not safe:
# https://stackoverflow.com/questions/46145100/is-it-unsafe-to-run-multiple-tensorflow-processes-on-the-same-gpu
no_gpu_config = tf.ConfigProto(device_count={'GPU': 0})
server = tf.train.Server(
cluster_spec, job_name='ps', task_index=task_index, config=no_gpu_config
)
threading.Thread(target=lambda: server.join()).start()
def _run_worker(env, tf_config):
env_vars = env.to_env_vars()
env_vars['TF_CONFIG'] = json.dumps(tf_config)
framework.entry_point.run(env.module_dir, env.user_entry_point, env.to_cmd_args(), env_vars)
def _wait_until_master_is_down(master):
while True:
try:
subprocess.check_call(
['curl', '{}:2222'.format(master)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logger.info('master {} is still up, waiting for it to exit'.format(master))
time.sleep(10)
except subprocess.CalledProcessError:
logger.info('master {} is down, stopping parameter server'.format(master))
return
def train(env):
"""Get training job environment from env and run the training job.
Args:
env (sagemaker_containers.beta.framework.env.TrainingEnv): Instance of TrainingEnv class
"""
parameter_server_enabled = env.additional_framework_parameters.get(
SAGEMAKER_PARAMETER_SERVER_ENABLED, False)
if len(env.hosts) > 1 and parameter_server_enabled:
tf_config = _build_tf_config(hosts=env.hosts, current_host=env.current_host)
logger.info('Running distributed training job with parameter servers')
logger.info('Launching parameter server process')
_run_ps(env, tf_config['cluster'])
logger.info('Launching worker process')
_run_worker(env, tf_config)
if not _is_host_master(env.hosts, env.current_host):
_wait_until_master_is_down(env.hosts[0])
else:
mpi_enabled = env.additional_framework_parameters.get('sagemaker_mpi_enabled')
if mpi_enabled:
runner_type = framework.runner.MPIRunnerType
else:
runner_type = framework.runner.ProcessRunnerType
framework.entry_point.run(env.module_dir, env.user_entry_point,
env.to_cmd_args(), env.to_env_vars(),
runner=runner_type)
def _log_model_missing_warning(model_dir):
pb_file_exists = False
file_exists = False
for dirpath, dirnames, filenames in os.walk(model_dir):
if filenames:
file_exists = True
for f in filenames:
if 'saved_model.pb' in f or 'saved_model.pbtxt' in f:
pb_file_exists = True
path, direct_parent_dir = os.path.split(dirpath)
if not str.isdigit(direct_parent_dir):
logger.warn('Your model will NOT be servable with SageMaker TensorFlow Serving containers.'
'The SavedModel bundle is under directory \"{}\", not a numeric name.'
.format(direct_parent_dir))
if not file_exists:
logger.warn('No model artifact is saved under path {}.'
' Your training job will not save any model files to S3.\n'
'For details of how to construct your training script see:\n'
'https://github.com/aws/sagemaker-python-sdk/tree/master/src/sagemaker/tensorflow#adapting-your-local-tensorflow-script' # noqa
.format(model_dir))
elif not pb_file_exists:
logger.warn('Your model will NOT be servable with SageMaker TensorFlow Serving container.'
'The model artifact was not saved in the TensorFlow SavedModel directory structure:\n'
'https://www.tensorflow.org/guide/saved_model#structure_of_a_savedmodel_directory')
def main():
"""Training entry point
"""
hyperparameters = framework.env.read_hyperparameters()
env = framework.training_env(hyperparameters=hyperparameters)
s3_utils.configure(env.hyperparameters.get('model_dir'), os.environ.get('SAGEMAKER_REGION'))
logger.setLevel(env.log_level)
train(env)
_log_model_missing_warning(MODEL_DIR)
|
watch.py | import logging
import threading
import grpc
import six
from six.moves import queue
import etcd3.etcdrpc as etcdrpc
import etcd3.events as events
import etcd3.exceptions as exceptions
import etcd3.utils as utils
_log = logging.getLogger(__name__)
class Watch(object):
def __init__(self, watch_id, iterator=None, etcd_client=None):
self.watch_id = watch_id
self.etcd_client = etcd_client
self.iterator = iterator
def cancel(self):
self.etcd_client.cancel_watch(self.watch_id)
def iterator(self):
if self.iterator is not None:
return self.iterator
raise ValueError('Undefined iterator')
class Watcher(object):
def __init__(self, watchstub, timeout=None, call_credentials=None,
metadata=None):
self.timeout = timeout
self._watch_stub = watchstub
self._credentials = call_credentials
self._metadata = metadata
self._lock = threading.Lock()
self._request_queue = queue.Queue(maxsize=10)
self._callbacks = {}
self._callback_thread = None
self._new_watch_cond = threading.Condition(lock=self._lock)
self._new_watch = None
def _create_watch_request(self, key, range_end=None, start_revision=None,
progress_notify=False, filters=None,
prev_kv=False):
create_watch = etcdrpc.WatchCreateRequest()
create_watch.key = utils.to_bytes(key)
if range_end is not None:
create_watch.range_end = utils.to_bytes(range_end)
if start_revision is not None:
create_watch.start_revision = start_revision
if progress_notify:
create_watch.progress_notify = progress_notify
if filters is not None:
create_watch.filters = filters
if prev_kv:
create_watch.prev_kv = prev_kv
return etcdrpc.WatchRequest(create_request=create_watch)
def add_callback(self, key, callback, range_end=None, start_revision=None,
progress_notify=False, filters=None, prev_kv=False):
rq = self._create_watch_request(key, range_end=range_end,
start_revision=start_revision,
progress_notify=progress_notify,
filters=filters, prev_kv=prev_kv)
with self._lock:
# Start the callback thread if it is not yet running.
if not self._callback_thread:
thread_name = 'etcd3_watch_%x' % (id(self),)
self._callback_thread = threading.Thread(name=thread_name,
target=self._run)
self._callback_thread.daemon = True
self._callback_thread.start()
# Only one create watch request can be pending at a time, so if
# there one already, then wait for it to complete first.
while self._new_watch:
self._new_watch_cond.wait()
# Submit a create watch request.
new_watch = _NewWatch(callback)
self._request_queue.put(rq)
self._new_watch = new_watch
try:
# Wait for the request to be completed, or timeout.
self._new_watch_cond.wait(timeout=self.timeout)
# If the request not completed yet, then raise a timeout
# exception.
if new_watch.id is None and new_watch.err is None:
raise exceptions.WatchTimedOut()
# Raise an exception if the watch request failed.
if new_watch.err:
raise new_watch.err
finally:
# Wake up threads stuck on add_callback call if any.
self._new_watch = None
self._new_watch_cond.notify_all()
return new_watch.id
def cancel(self, watch_id):
with self._lock:
callback = self._callbacks.pop(watch_id, None)
if not callback:
return
self._cancel_no_lock(watch_id)
def _run(self):
while True:
response_iter = self._watch_stub.Watch(
_new_request_iter(self._request_queue),
credentials=self._credentials,
metadata=self._metadata)
try:
for rs in response_iter:
self._handle_response(rs)
except grpc.RpcError as err:
with self._lock:
if self._new_watch:
self._new_watch.err = err
self._new_watch_cond.notify_all()
callbacks = self._callbacks
self._callbacks = {}
# Rotate request queue. This way we can terminate one gRPC
# stream and initiate another one whilst avoiding a race
# between them over requests in the queue.
self._request_queue.put(None)
self._request_queue = queue.Queue(maxsize=10)
for callback in six.itervalues(callbacks):
_safe_callback(callback, err)
def _handle_response(self, rs):
with self._lock:
if rs.created:
# If the new watch request has already expired then cancel the
# created watch right away.
if not self._new_watch:
self._cancel_no_lock(rs.watch_id)
return
if rs.compact_revision != 0:
self._new_watch.err = exceptions.RevisionCompactedError(
rs.compact_revision)
return
self._callbacks[rs.watch_id] = self._new_watch.callback
self._new_watch.id = rs.watch_id
self._new_watch_cond.notify_all()
callback = self._callbacks.get(rs.watch_id)
# Ignore leftovers from canceled watches.
if not callback:
return
# The watcher can be safely reused, but adding a new event
# to indicate that the revision is already compacted
# requires api change which would break all users of this
# module. So, raising an exception if a watcher is still
# alive.
if rs.compact_revision != 0:
err = exceptions.RevisionCompactedError(rs.compact_revision)
_safe_callback(callback, err)
self.cancel(rs.watch_id)
return
for event in rs.events:
_safe_callback(callback, events.new_event(event))
def _cancel_no_lock(self, watch_id):
cancel_watch = etcdrpc.WatchCancelRequest()
cancel_watch.watch_id = watch_id
rq = etcdrpc.WatchRequest(cancel_request=cancel_watch)
self._request_queue.put(rq)
class _NewWatch(object):
def __init__(self, callback):
self.callback = callback
self.id = None
self.err = None
def _new_request_iter(_request_queue):
while True:
rq = _request_queue.get()
if rq is None:
return
yield rq
def _safe_callback(callback, event_or_err):
try:
callback(event_or_err)
except Exception:
_log.exception('Watch callback failed')
|
refletancia.py | #!/usr/bin/env python
import rospy
from SensorsListener import SensorsListener
#import motores
class Refletancia():
def __init__(self, sl):
self.sl = sl
def maisEsqBranco(self): return self.sl.getRefle(0) > 4
def esqBranco(self): return self.sl.getRefle(1) > 4
def dirBranco(self): return self.sl.getRefle(2) > 4
def maisDirBranco(self): return self.sl.getRefle(3) > 4
def b_b_b_b(self):
return self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_p_p(self):
return not self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def p_b_b_b(self):
return not maisEsqBranco() and self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_b_b(self):
return not self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_p_b(self):
return not self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def b_p_p_p(self):
return self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def b_b_p_p(self):
return self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def b_b_b_p(self):
return self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def p_b_p_b(self):
return not self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def p_b_b_p(self):
return not self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_b_p(self):
return self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_p_b(self):
return self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def p_b_p_p(self):
return not self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def p_p_b_p(self):
return not self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_b_b(self):
return self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def b_b_p_b(self):
return self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
'''
Utilizar com a classe feita de testeEstrategia
if __name__ == "__main__":
try:
sl = SensorsListener()
threading.Thread(target=showValue).start()
sl.register()
'''
|
variable_scope_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
from tensorflow.python.util import tf_inspect
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
if context.executing_eagerly():
wrapped = wrap_function.wrap_function(graph_function, [self])
# use the wrapped graph function
wrapped()
else:
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'string:0' shape=() dtype=string>
# has invalid type <class '...ResourceVariable'>, must be a string or Tensor.
# (Can not convert a ResourceVariable into a Tensor or Operation.)
def testStringDefaultInitializer(self):
with self.cached_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(self.evaluate(v)), b"")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertAllEqual([v1, v2], [v3, v4])
f()
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_in_graph_and_eager_modes
def testEagerVariablesStoreAddsToCollections(self):
store = variable_scope.EagerVariableStore()
with store.as_default():
trainable = variable_scope.get_variable("v1", [], trainable=True)
not_trainable = variable_scope.get_variable("v2", [], trainable=False)
concat = variable_scope.get_variable(
"v3", [], collections=[ops.GraphKeys.CONCATENATED_VARIABLES])
self.assertEqual(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES),
[trainable, not_trainable])
self.assertEqual(
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES),
[trainable, concat])
self.assertEqual(
ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES), [concat])
def testEagerVariablesOutsideStoreNotAddedToCollections(self):
with context.eager_mode():
variable_scope.get_variable("v1", [], trainable=True)
variable_scope.get_variable("v2", [], trainable=False)
self.assertFalse(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertFalse(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError=: You must feed a value for placeholder tensor
# 'ReadVariableOp/resource' with dtype resource
@test_util.run_in_graph_and_eager_modes
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# InvalidArgumentError: /job:moo/replica:0/task:0/device:CPU:0 unknown device.
def testVarScopeCachingDevice(self):
with self.cached_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AttributeError: Tensor.name is meaningless when eager execution is enabled.
@test_util.run_in_graph_and_eager_modes
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is already there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Tensor-typed variable initializers must either be wrapped in an
# init_scope or callable...
@test_util.run_in_graph_and_eager_modes
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument <tf.Variable 'v0:0' shape=(1,) dtype=float32> has
# invalid type <class '...ops.resource_variable_ops.ResourceVariable'>, must
# be a string or Tensor. (Can not convert a ResourceVariable into a Tensor or
# Operation.)
def testControlDeps(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual(1, self.evaluate(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# AssertionError: True is not false (last assertFalse)
def testEnableResourceVariables(self):
old = variable_scope._DEFAULT_USE_RESOURCE
try:
variable_scope.enable_resource_variables()
self.assertTrue(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
variable_scope.disable_resource_variables()
self.assertFalse(isinstance(variables_lib.VariableV1(1.0),
resource_variable_ops.ResourceVariable))
finally:
variable_scope._DEFAULT_USE_RESOURCE = old
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Fetch argument None has invalid type <type 'NoneType'>
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
sess.run(add)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'.
@test_util.run_in_graph_and_eager_modes
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOriginalNameScope(self):
with self.cached_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeObjectReuse(self):
with self.cached_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name,
"outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
# (different assertions failing after wrapping, in both execution modes)
@test_util.run_in_graph_and_eager_modes
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSignatureGetVarVsGetLocalVar(self):
"""get_{local,}variable() must take the same list of args."""
arg_names = tf_inspect.getargspec(variable_scope.get_variable)[0]
local_arg_names = tf_inspect.getargspec(
variable_scope.get_local_variable)[0]
self.assertEqual(arg_names, local_arg_names)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetCollection(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithGetVariable(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.get_variable(
"testGetTrainableVariables_d", [],
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.get_variable(
"testGetTrainableVariables_e", [],
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetTrainableVariablesWithVariable(self):
with self.cached_session():
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_a")
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.variable(1.0, name="testGetTrainableVariables_b")
_ = variable_scope.variable(
1.0, name="testGetTrainableVariables_c", trainable=False)
# sync `ON_READ` sets trainable=False
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_d",
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/testGetTrainableVariables_b:0"])
# All other sync values sets trainable=True
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual([v.name for v in scope.trainable_variables()], [
"testGetTrainableVariables_foo/testGetTrainableVariables_b:0",
"testGetTrainableVariables_foo/testGetTrainableVariables_e:0"
])
with self.assertRaisesRegexp(
ValueError, "Synchronization value can be set to "
"VariableSynchronization.ON_READ only for non-trainable variables. "
"You have specified trainable=True and "
"synchronization=VariableSynchronization.ON_READ."):
_ = variable_scope.variable(
1.0,
name="testGetTrainableVariables_e",
synchronization=variable_scope.VariableSynchronization.ON_READ,
trainable=True)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetGlobalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testGetLocalVariables(self):
with self.cached_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError,
r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = variable_scope.VariableSynchronization.AUTO
aggregation = variable_scope.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
variable_scope.get_variable("v", [1])
self.assertEqual(1, called[0])
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
synchronization = variable_scope.VariableSynchronization.ON_READ
aggregation = variable_scope.VariableAggregation.MEAN
variable_scope.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'custom_getter/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("custom_getter/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# ValueError: Fetch argument <tf.Tensor 'sum_getter_2/add:0' shape=(1, 2, 3)
# dtype=float32> cannot be interpreted as a Tensor. (Tensor
# Tensor("sum_getter_2/add:0", shape=(1, 2, 3), dtype=float32) is not an
# element of this graph.)
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.cached_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertAllEqual(variable_names, ["forced_name"])
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
variable_scope.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
variable_scope.VariableAggregation.MEAN)
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creater_c):
variable_scope.get_variable(
"v", [],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertTrue(called[0])
class PartitionInfoTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
test_insert_20.py | import threading
import numpy as np
import pandas as pd
import pytest
from pymilvus import Index
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create 2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_empty_field_name_dataframe(self):
"""
target: test insert empty field name df
method: dataframe with empty column
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: ' '}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_field_name_dataframe(self, get_invalid_field_name):
"""
target: test insert with invalid dataframe data
method: insert with invalid field name dataframe
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_int64_field_name: get_invalid_field_name}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: "The types of schema and data do not match"}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
def test_insert_dataframe_index(self):
"""
target: test insert dataframe with index
method: insert dataframe with index
expected: todo
"""
pass
@pytest.mark.tags(CaseLabel.L2)
def test_insert_none(self):
"""
target: test insert None
method: data is None
expected: return successfully with zero results
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
mutation_res, _ = collection_w.insert(data=None)
assert mutation_res.insert_count == 0
assert len(mutation_res.primary_keys) == 0
assert collection_w.is_empty
assert collection_w.num_entities == 0
@pytest.mark.tags(CaseLabel.L2)
def test_insert_numpy_data(self):
"""
target: test insert numpy.ndarray data
method: 1.create by schema 2.insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_numpy_data(nb=10)
error = {ct.err_code: 0, ct.err_msg: "Data type not support numpy.ndarray"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_dataframe(self):
"""
target: test insert binary dataframe
method: 1. create by schema 2. insert dataframe
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_data(self):
"""
target: test insert list-like binary data
method: 1. create by schema 2. insert data
expected: assert num_entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
data, _ = cf.gen_default_binary_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_single(self):
"""
target: test insert single
method: insert one entity
expected: verify num
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=1)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == 1
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == 1
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_insert_dim_not_match(self):
"""
target: test insert with not match dim
method: insert data dim not equal to schema dim
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
dim = 129
df = cf.gen_default_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_insert_binary_dim_not_match(self):
"""
target: test insert binary with dim not match
method: insert binary data dim not equal to schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
dim = 120
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb, dim=dim)
error = {ct.err_code: 1,
ct.err_msg: f'Collection field dim is {ct.default_dim}, but entities field dim is {dim}'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_name_not_match(self):
"""
target: test insert field name not match
method: data field name not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(10)
df.rename(columns={ct.default_float_field_name: "int"}, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_field_value_not_match(self):
"""
target: test insert data value not match
method: insert data value type not match schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
df = cf.gen_default_dataframe_data(nb)
new_float_value = pd.Series(data=[float(i) for i in range(nb)], dtype="float64")
df.iloc[:, 1] = new_float_value
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_value_less(self):
"""
target: test insert value less than other
method: int field value less than vec-field value
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb - 1)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_vector_value_less(self):
"""
target: test insert vector value less than other
method: vec field value less than int field
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb - 1, ct.default_dim)
data = [int_values, float_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'Arrays must all be same length.'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_more(self):
"""
target: test insert with fields more
method: field more than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
new_values = [i for i in range(ct.default_nb)]
df.insert(3, 'new', new_values)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_fields_less(self):
"""
target: test insert with fields less
method: fields less than schema fields
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
df.drop(ct.default_float_vec_field_name, axis=1, inplace=True)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema.'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_list_order_inconsistent_schema(self):
"""
target: test insert data fields order inconsistent with schema
method: insert list data, data fields order inconsistent with schema
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
data = [float_values, int_values, float_vec_values]
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_order_inconsistent_schema(self):
"""
target: test insert with dataframe fields inconsistent with schema
method: insert dataframe, and fields order inconsistent with schema
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 10
int_values = pd.Series(data=[i for i in range(nb)])
float_values = pd.Series(data=[float(i) for i in range(nb)], dtype="float32")
float_vec_values = cf.gen_vectors(nb, ct.default_dim)
df = pd.DataFrame({
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values,
ct.default_int64_field_name: int_values
})
error = {ct.err_code: 0, ct.err_msg: 'The types of schema and data do not match'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_inconsistent_data(self):
"""
target: test insert with inconsistent data
method: insert with data that same field has different type data
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(nb=100)
data[0][1] = 1.0
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
collection_w.insert(data, check_task=CheckTasks.err_res, check_items=error)
class TestInsertOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert interface operations
******************************************************************
"""
@pytest.fixture(scope="function", params=[8, 4096])
def dim(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connection(self):
"""
target: test insert without connection
method: insert after remove connection
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
data = cf.gen_default_list_data(10)
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
@pytest.mark.parametrize("vec_fields", [[cf.gen_float_vec_field(name="float_vector1")],
[cf.gen_binary_vec_field()],
[cf.gen_binary_vec_field(), cf.gen_binary_vec_field("binary_vec")]])
def test_insert_multi_float_vec_fields(self, vec_fields):
"""
target: test insert into multi float vec fields collection
method: create collection with different schema and insert
expected: verify num entities
"""
schema = cf.gen_schema_multi_vector_fields(vec_fields)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_dataframe_multi_vec_fields(vec_fields=vec_fields)
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_drop_collection(self):
"""
target: test insert and drop
method: insert data and drop collection
expected: verify collection if exist
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name in collection_list
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
collection_w.drop()
collection_list, _ = self.utility_wrap.list_collections()
assert collection_w.name not in collection_list
@pytest.mark.tags(CaseLabel.L1)
def test_insert_create_index(self):
"""
target: test insert and create index
method: 1. insert 2. create index
expected: verify num entities and index
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L1)
def test_insert_after_create_index(self):
"""
target: test insert after create index
method: 1. create index 2. insert data
expected: verify index and num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
df = cf.gen_default_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_after_index(self):
"""
target: test insert binary after index
method: 1.create index 2.insert binary data
expected: 1.index ok 2.num entities correct
"""
schema = cf.gen_default_binary_collection_schema()
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.indexes[0] == index
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
collection_w.insert(data=df)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_create_index(self):
"""
target: test create index in auto_id=True collection
method: 1.create auto_id=True collection and insert 2.create index
expected: index correct
"""
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix), schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
index, _ = collection_w.index()
assert index == Index(collection_w.collection, ct.default_float_vec_field_name, default_index_params)
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
mutation_res_1, _ = collection_w.insert(data=df)
primary_keys.extend(mutation_res_1.primary_keys)
assert cf._check_primary_keys(primary_keys, nb * 2)
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_list_data(self):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
mutation_res, _ = collection_w.insert(data=data[1:])
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'Auto_id is True, primary field should not have data'}
collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_with_list_values(self):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: 'The data fields number is not match with schema'}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_same_values(self):
"""
target: test insert same ids with auto_id false
method: 1.create collection with auto_id=False 2.insert same int64 field values
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb=nb)
data[0] = [1 for i in range(nb)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == data[0]
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_false_negative_values(self):
"""
target: test insert negative ids with auto_id false
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
nb = 100
data = cf.gen_default_list_data(nb)
data[0] = [i for i in range(0, -nb, -1)]
mutation_res, _ = collection_w.insert(data)
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_multi_threading(self):
"""
target: test concurrent insert
method: multi threads insert
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(ct.default_nb)
thread_num = 4
threads = []
primary_keys = df[ct.default_int64_field_name].values.tolist()
def insert(thread_i):
log.debug(f'In thread-{thread_i}')
mutation_res, _ = collection_w.insert(df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == primary_keys
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for t in threads:
t.join()
assert collection_w.num_entities == ct.default_nb * thread_num
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="Currently primary keys are not unique")
def test_insert_multi_threading_auto_id(self):
"""
target: test concurrent insert auto_id=True collection
method: 1.create auto_id=True collection 2.concurrent insert
expected: verify primary keys unique
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_insert_multi_times(self, dim):
"""
target: test insert multi times
method: insert data multi times
expected: verify num entities
"""
step = 120
nb = 12000
collection_w = self.init_collection_general(prefix, dim=dim)[0]
for _ in range(nb // step):
df = cf.gen_default_dataframe_data(step, dim)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == step
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_all_datatype_collection(self):
"""
target: test insert into collection that contains all datatype fields
method: 1.create all datatype collection 2.insert data
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_dataframe_all_data_type(nb=nb)
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name)
assert self.collection_wrap.num_entities == nb
class TestInsertAsync(TestcaseBase):
"""
******************************************************************
The following cases are used to test insert async
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_insert_sync(self):
"""
target: test async insert
method: insert with async=True
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_false(self):
"""
target: test insert with false async
method: async = false
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
mutation_res, _ = collection_w.insert(data=df, _async=False)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_async_callback(self):
"""
target: test insert with callback func
method: insert with callback func
expected: verify num entities
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result)
future.done()
mutation_res = future.result()
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self):
"""
target: test insert with async
method: insert 5w entities with callback func
expected: verify num entities
"""
nb = 50000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True)
future.done()
mutation_res = future.result()
assert mutation_res.insert_count == nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self):
"""
target: test insert async with callback
method: insert 10w entities with timeout=1
expected: raise exception
"""
nb = 100000
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data(nb)
future, _ = collection_w.insert(data=df, _async=True, _callback=assert_mutation_result, timeout=1)
with pytest.raises(Exception):
future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_data(self):
"""
target: test insert async with invalid data
method: insert async with invalid data
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = pd.DataFrame(columns=columns)
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
collection_w.insert(data=df, _async=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_partition(self):
"""
target: test insert async with invalid partition
method: insert async with invalid partition
expected: raise exception
"""
collection_w = self.init_collection_wrap(name=cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
err_msg = "partitionID of partitionName:p can not be find"
future, _ = collection_w.insert(data=df, partition_name="p", _async=True)
future.done()
with pytest.raises(Exception, match=err_msg):
future.result()
def assert_mutation_result(mutation_res):
assert mutation_res.insert_count == ct.default_nb
|
data_utils.py | # Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
import tensorflow.compat.v2 as tf
from abc import abstractmethod
from contextlib import closing
import functools
import hashlib
import multiprocessing.dummy
import os
import pathlib
import queue
import random
import shutil
import tarfile
import threading
import time
import typing
import urllib
import weakref
import zipfile
from six.moves.urllib.parse import urlsplit
import numpy as np
from six.moves.urllib.request import urlopen
from keras.utils import tf_inspect
from keras.utils.generic_utils import Progbar
from keras.utils import io_utils
from tensorflow.python.util.tf_export import keras_export
# Required to support google internal urlretrieve
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS. # pylint: disable=using-constant-test
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from urllib.request import urlretrieve # pylint: disable=g-importing-member
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return (tf_inspect.isgenerator(x) or
isinstance(x, Sequence) or
isinstance(x, typing.Iterator))
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = io_utils.path_to_string(file_path)
path = io_utils.path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location. If `None`, the
name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if origin is None:
raise ValueError('Please specify the "origin" argument (URL of the file '
'to download).')
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = io_utils.path_to_string(fname)
if not fname:
fname = os.path.basename(urlsplit(origin).path)
if not fname:
raise ValueError(
f"Can't parse the file name from the origin provided: '{origin}'."
"Please specify the `fname` as the input param.")
if untar:
if fname.endswith('.tar.gz'):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix('').with_suffix('')
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
io_utils.print_msg(
'A local file was found, but it seems to be '
f'incomplete or outdated because the {hash_algorithm} '
f'file hash does not match the original value of {file_hash} '
'so we will re-download the data.')
download = True
else:
download = True
if download:
io_utils.print_msg(f'Downloading data from {origin}')
class DLProgbar:
"""Manage progress bar state for use in urlretrieve."""
def __init__(self):
self.progbar = None
self.finished = False
def __call__(self, block_num, block_size, total_size):
if not self.progbar:
if total_size == -1:
total_size = None
self.progbar = Progbar(total_size)
current = block_num * block_size
if current < total_size:
self.progbar.update(current)
elif not self.finished:
self.progbar.update(self.progbar.target)
self.finished = True
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, DLProgbar())
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
# Validate download if succeeded and user provided an expected hash
# Security conscious users would get the hash of the file from a separate
# channel and pass it to this API to prevent MITM / corruption:
if os.path.exists(fpath) and file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
raise ValueError(
f'Incomplete or corrupted file detected. The {hash_algorithm} '
f'file hash does not match the provided value of {file_hash}.')
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == 'sha256':
return hashlib.sha256()
if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter:
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence:
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(tf.keras.utils.Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of data.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e: # pylint: disable=broad-except
self.stop()
raise e
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
raise e
@keras_export('keras.utils.pad_sequences',
'keras.preprocessing.sequence.pad_sequences')
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> tf.keras.preprocessing.sequence.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to 0.)
Returns:
Numpy array with shape `(len(sequences), maxlen)`
Raises:
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError('`sequences` must be a list of iterables. '
f'Found non-iterable: {str(x)}') from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.unicode_)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f'`dtype` {dtype} is not compatible with `value`\'s type: '
f'{type(value)}\nYou should set `dtype=object` for variable length '
'strings.')
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s): # pylint: disable=g-explicit-length-test
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:] # pylint: disable=invalid-unary-operand-type
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(f'Shape of sample {trunc.shape[1:]} of sequence at '
f'position {idx} is different from expected shape '
f'{sample_shape}')
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
|
silvia-pi.py | #!/usr/bin/python
def scheduler(dummy,state):
import time
import sys
import schedule
from datetime import datetime
sys.stdout = open("scheduler.log", "a")
sys.stderr = open("scheduler.err.log", "a")
with open("scheduler.log", "a") as fsch:
print("Starting scheduler thread ...", file=fsch)
last_wake = 0
last_sleep = 0
last_sched_switch = 0
while True:
if last_wake != state['wake_time'] or last_sleep != state['sleep_time'] or last_sched_switch != state['sched_enabled']:
schedule.clear()
if state['sched_enabled'] == True:
schedule.every().day.at(state['sleep_time']).do(gotosleep,1,state)
schedule.every().day.at(state['wake_time']).do(wakeup,1,state)
nowtm = float(datetime.now().hour) + float(datetime.now().minute)/60.
sleeptm = state['sleep_time'].split(":")
sleeptm = float(sleeptm[0]) + float(sleeptm[1])/60.
waketm = state['wake_time'].split(":")
waketm = float(waketm[0]) + float(waketm[1])/60.
if waketm < sleeptm:
if nowtm >= waketm and nowtm < sleeptm:
wakeup(1,state)
else:
gotosleep(1,state)
elif waketm > sleeptm:
if nowtm < waketm and nowtm >= sleeptm:
gotosleep(1,state)
else:
wakeup(1,state)
else:
wakeup(1,state)
last_wake = state['wake_time']
last_sleep = state['sleep_time']
last_sched_switch = state['sched_enabled']
schedule.run_pending()
time.sleep(1)
def wakeup(dummy,state):
state['is_awake'] = True
def gotosleep(dummy,state):
state['is_awake'] = False
def he_control_loop(dummy,state):
from time import sleep
import RPi.GPIO as GPIO
import config as conf
GPIO.setmode(GPIO.BCM)
GPIO.setup(conf.he_pin, GPIO.OUT)
GPIO.output(conf.he_pin,0)
heating = False
try:
while True:
avgpid = state['avgpid']
if state['is_awake'] == False :
state['heating'] = False
GPIO.output(conf.he_pin,0)
sleep(1)
else:
if avgpid >= 100 :
state['heating'] = True
GPIO.output(conf.he_pin,1)
sleep(1)
elif avgpid > 0 and avgpid < 100:
state['heating'] = True
GPIO.output(conf.he_pin,1)
sleep(avgpid/100.)
GPIO.output(conf.he_pin,0)
sleep(1-(avgpid/100.))
state['heating'] = False
else:
GPIO.output(conf.he_pin,0)
state['heating'] = False
sleep(1)
finally:
GPIO.output(conf.he_pin,0)
GPIO.cleanup()
def pid_loop(dummy,state):
import sys
from time import sleep, time
from math import isnan
import board, busio, digitalio, adafruit_max31855
import PID as PID
import config as conf
from collections import deque
sys.stdout = open("pid.log", "a")
sys.stderr = open("pid.err.log", "a")
def c_to_f(c):
return c * 9.0 / 5.0 + 32.0
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = digitalio.DigitalInOut(board.D8)
sensor = adafruit_max31855.MAX31855(spi=spi, cs=cs)
pid = PID.PID(conf.Pc,conf.Ic,conf.Dc)
pid.SetPoint = state['settemp']
pid.setSampleTime(conf.sample_time*5)
nanct=0
i=0
j=0
pidhist = deque([0.]*10)
avgpid = 0.
temphist = deque([0.]*5)
avgtemp = 0.
lastsettemp = state['settemp']
lasttime = time()
sleeptime = 0
iscold = True
iswarm = False
lastcold = 0
lastwarm = 0
with open('pid.log','a') as fpid:
try:
while True : # Loops 10x/second
temp = sensor.temperature
if isnan(temp) :
nanct += 1
if nanct > 100000 :
sys.exit
continue
else:
nanct = 0
temphist.popleft()
temphist.append(temp)
avgtemp = sum(temphist)/len(temphist)
if avgtemp < 100 :
lastcold = i
if avgtemp > 200 :
lastwarm = i
if iscold and (i-lastcold)*conf.sample_time > 60*15 :
pid = PID.PID(conf.Pw,conf.Iw,conf.Dw)
pid.SetPoint = state['settemp']
pid.setSampleTime(conf.sample_time*5)
iscold = False
if iswarm and (i-lastwarm)*conf.sample_time > 60*15 :
pid = PID.PID(conf.Pc,conf.Ic,conf.Dc)
pid.SetPoint = state['settemp']
pid.setSampleTime(conf.sample_time*5)
iscold = True
if state['settemp'] != lastsettemp :
pid.SetPoint = state['settemp']
lastsettemp = state['settemp']
if i%10 == 0 :
pid.update(avgtemp)
pidout = pid.output
pidhist.popleft()
pidhist.append(pidout)
avgpid = sum(pidhist)/len(pidhist)
state['i'] = i
state['temp'] = round(temp,2)
state['avgtemp'] = round(avgtemp,2)
state['pidval'] = round(pidout,2)
state['avgpid'] = round(avgpid,2)
state['pterm'] = round(pid.PTerm,2)
if iscold :
state['iterm'] = round(pid.ITerm * conf.Ic,2)
state['dterm'] = round(pid.DTerm * conf.Dc,2)
else :
state['iterm'] = round(pid.ITerm * conf.Iw,2)
state['dterm'] = round(pid.DTerm * conf.Dw,2)
state['iscold'] = iscold
print(time(), state, file=fpid)
sleeptime = lasttime+conf.sample_time-time()
if sleeptime < 0 :
sleeptime = 0
sleep(sleeptime)
i += 1
lasttime = time()
finally:
pid.clear
def rest_server(dummy,state):
from bottle import route, run, get, post, request, static_file, abort
from subprocess import call
from datetime import datetime
import config as conf
import os
@route('/')
def docroot():
return static_file('index.html',wwwdir)
@route('/<filepath:path>')
def servfile(filepath):
return static_file(filepath,wwwdir)
@route('/curtemp')
def curtemp():
return str(state['avgtemp'])
@get('/settemp')
def settemp():
return str(state['settemp'])
@post('/settemp')
def post_settemp():
try:
settemp = float(request.forms.get('settemp'))
if settemp >= 0 and settemp <= 160 :
state['settemp'] = settemp
return str(settemp)
else:
abort(400,'Set temp out of range 0-160.')
except:
abort(400,'Invalid number for set temp.')
@get('/is_awake')
def get_is_awake():
return str(state['is_awake'])
@post('/scheduler')
def set_sched():
sched = request.forms.get('scheduler')
if sched == "True":
state['sched_enabled'] = True
elif sched == "False":
state['sched_enabled'] = False
state['is_awake'] = True
else:
abort(400,'Invalid scheduler setting. Expecting True or False')
@post('/setwake')
def set_wake():
wake = request.forms.get('wake')
try:
datetime.strptime(wake,'%H:%M')
except:
abort(400,'Invalid time format.')
state['wake_time'] = wake
return str(wake)
@post('/setsleep')
def set_sleep():
sleep = request.forms.get('sleep')
try:
datetime.strptime(sleep,'%H:%M')
except:
abort(400,'Invalid time format.')
state['sleep_time'] = sleep
return str(sleep)
@get('/allstats')
def allstats():
return dict(state)
@route('/restart')
def restart():
call(["reboot"])
return '';
@route('/shutdown')
def shutdown():
call(["shutdown","-h","now"])
return '';
@get('/healthcheck')
def healthcheck():
return 'OK'
with open('webserver.log','a') as fweb:
print('derp',file=fweb)
basedir = os.path.dirname(os.path.realpath(__file__))
print("basedir:",basedir,file=fweb)
wwwdir = basedir+'/www/'
print("wwwdir:",wwwdir,file=fweb)
print("running the server now...",file=fweb)
run(host='0.0.0.0',port=conf.port,server='cheroot')
if __name__ == '__main__':
from multiprocessing import Process, Manager
from time import sleep
from urllib.request import urlopen
import config as conf
manager = Manager()
pidstate = manager.dict()
pidstate['is_awake'] = True
pidstate['sched_enabled'] = conf.sched_enabled
pidstate['sleep_time'] = conf.sleep_time
pidstate['wake_time'] = conf.wake_time
pidstate['i'] = 0
pidstate['settemp'] = conf.set_temp
pidstate['avgpid'] = 0.
print("Starting Scheduler thread...")
s = Process(target=scheduler,args=(1,pidstate))
s.daemon = True
s.start()
print("Starting PID thread...")
p = Process(target=pid_loop,args=(1,pidstate))
p.daemon = True
p.start()
print("Starting HE Control thread...")
h = Process(target=he_control_loop,args=(1,pidstate))
h.daemon = True
h.start()
print("Starting REST Server thread...")
r = Process(target=rest_server,args=(1,pidstate))
r.daemon = True
r.start()
print("Starting Watchdog...")
piderr = 0
weberr = 0
weberrflag = 0
urlhc = 'http://localhost:'+str(conf.port)+'/healthcheck'
lasti = pidstate['i']
sleep(1)
while p.is_alive() and h.is_alive() and r.is_alive() and s.is_alive():
# while r.is_alive():
curi = pidstate['i']
if curi == lasti :
piderr = piderr + 1
else :
piderr = 0
lasti = curi
if piderr > 9 :
print('ERROR IN PID THREAD, RESTARTING')
p.terminate()
try:
hc = urlopen(urlhc,timeout=2)
except:
weberrflag = 1
else:
if hc.getcode() != 200 :
weberrflag = 1
if weberrflag != 0 :
weberr = weberr + 1
if weberr > 9 :
print('ERROR IN WEB SERVER THREAD, RESTARTING')
r.terminate()
weberrflag = 0
sleep(1)
|
__init__.py | # -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
from werkzeug.exceptions import HTTPException
__author__ = 'Junbo Wang'
__version__ = '2.0'
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from datetime import timedelta
from util import safe_get_config, get_class, Utility, Email, DisabledVoiceVerify, RonglianVoiceVerify, DisabledSms, \
ChinaTelecomSms
from hackathon_factory import factory, RequiredFeature
from hackathon_scheduler import HackathonScheduler
from hackathon_response import *
from hackathon_exception import *
from log import log
from context import Context
__all__ = [
"app",
"Context",
"RequiredFeature",
"Component",
]
# initialize flask and flask restful
app = Flask(__name__)
app.config['SECRET_KEY'] = safe_get_config("app.secret_key", "secret_key")
class HackathonApi(Api):
"""Customize Api to give a chance to handle exceptions in framework level.
So that our restful APIs will always respond with code 200 even if Exception thrown and not caught in our codes
We can raise HTTPException and it's inheritances directly in components, they will be caught here. Now we have two
ways to response with error:
- return bad_request("some message")
- raise Bad_Request("some message")
You can decide to use either way ,they are of the same.
"""
def handle_error(self, e):
log.error(e)
if isinstance(e, HTTPException):
message = e.description
if hasattr(e, "data") and "message" in e.data:
message = e.data["message"]
if e.code == 400:
return self.make_response(bad_request(message), 200)
if e.code == 401:
return self.make_response(unauthorized(message), 200)
if e.code == 403:
return self.make_response(forbidden(message), 200)
if e.code == 404:
return self.make_response(not_found(message), 200)
if e.code == 409:
return self.make_response(conflict(message), 200)
if e.code == 412:
return self.make_response(precondition_failed(message), 200)
if e.code == 415:
return self.make_response(unsupported_mediatype(message), 200)
if e.code == 500:
return self.make_response(internal_server_error(message), 200)
# if exception cannot be handled, return error 500
return self.make_response(internal_server_error(e.message), 200)
# init restful API
api = HackathonApi(app)
# Enable CORS support. Currently requests of all methods from all domains are allowed
app.config['CORS_HEADERS'] = 'Content-Type, token, hackathon_name'
cors = CORS(app)
# initialize hackathon scheduler
scheduler = HackathonScheduler(app)
@app.errorhandler(400)
def bad_request_handler(error):
log.error(error)
return bad_request(error.message)
@app.errorhandler(412)
def precondition_failed_handler(error):
log.error(error)
return precondition_failed(error.message)
@app.errorhandler(Exception)
def exception_handler(error):
log.error(error)
return internal_server_error(error.message)
@app.before_request
def before_request():
user_manager = RequiredFeature("user_manager")
user_manager.update_user_operation_time()
class Component(object):
"""Base class of business object
inheritance classes can make use of self.log, self.db and self.util directly without import or instantiating,
"""
log = RequiredFeature("log")
db = RequiredFeature("db")
util = RequiredFeature("util")
scheduler = RequiredFeature("scheduler")
cache = RequiredFeature("cache")
def init_components():
"""Init hackathon factory"""
from hackathon.user import UserManager, UserProfileManager
from hackathon.hack import HackathonManager, AdminManager, TeamManager, DockerHostManager, \
AzureCertManager, RegisterManager, HackathonTemplateManager, Cryptor
from hackathon.template import TemplateLibrary
from hackathon.remote.guacamole import GuacamoleInfo
from hackathon.cache.cache_mgr import CacheManagerExt
# dependencies MUST be provided in advance
factory.provide("util", Utility)
factory.provide("log", log)
init_db()
# utils
init_voice_verify()
init_sms()
factory.provide("email", Email)
# cache
factory.provide("cache", CacheManagerExt)
# scheduler
factory.provide("scheduler", scheduler)
# business components
factory.provide("user_manager", UserManager)
factory.provide("user_profile_manager", UserProfileManager)
factory.provide("hackathon_manager", HackathonManager)
factory.provide("register_manager", RegisterManager)
factory.provide("azure_cert_manager", AzureCertManager)
factory.provide("cryptor", Cryptor)
factory.provide("docker_host_manager", DockerHostManager)
factory.provide("hackathon_template_manager", HackathonTemplateManager)
factory.provide("template_library", TemplateLibrary)
factory.provide("admin_manager", AdminManager)
factory.provide("team_manager", TeamManager)
factory.provide("guacamole", GuacamoleInfo)
# experiment starter
init_expr_components()
# health check items
factory.provide("health_check_hosted_docker", get_class("hackathon.health.health_check.HostedDockerHealthCheck"))
factory.provide("health_check_alauda_docker", get_class("hackathon.health.health_check.AlaudaDockerHealthCheck"))
factory.provide("health_check_guacamole", get_class("hackathon.health.health_check.GuacamoleHealthCheck"))
factory.provide("health_check_azure", get_class("hackathon.health.health_check.AzureHealthCheck"))
factory.provide("health_check_mongodb", get_class("hackathon.health.health_check.MongoDBHealthCheck"))
# docker
factory.provide("hosted_docker_proxy", get_class("hackathon.docker.hosted_docker.HostedDockerFormation"))
factory.provide("alauda_docker_proxy", get_class("hackathon.docker.alauda_docker.AlaudaDockerFormation"))
# storage
init_hackathon_storage()
def init_db():
from hmongo import db
factory.provide("db", db, suspend_callable=True)
def init_expr_components():
from expr import ExprManager, AzureVMExprStarter, AzureHostedDockerStarter, AlaudaDockerStarter, K8SExprStarter
factory.provide("expr_manager", ExprManager)
factory.provide("alauda_docker", AlaudaDockerStarter)
factory.provide("azure_docker", AzureHostedDockerStarter)
factory.provide("azure_vm", AzureVMExprStarter)
factory.provide("k8s_service", K8SExprStarter)
def init_voice_verify():
""" initial voice verify service
Example for config.py:
"voice_verify": {
"enabled": True,
"provider": "rong_lian",
"rong_lian": {
... ...
}
}
"""
provider_name = safe_get_config("voice_verify.provider", None)
enabled = safe_get_config("voice_verify.enabled", False)
if not enabled:
log.warn("voice verify disabled")
factory.provide("voice_verify", DisabledVoiceVerify)
elif provider_name and safe_get_config("voice_verify." + provider_name, None):
log.warn("Voice verify initialized to:" + provider_name)
# if provider other than Ronglian is supported, update following lines
factory.provide("voice_verify", RonglianVoiceVerify)
else:
log.warn("either voice verify provider name or provider config is missing, Please check your configuration")
raise ConfigurationException("voice_verify.provider")
def init_sms():
""" initial SMS service """
provider_name = safe_get_config("sms.provider", None)
enabled = safe_get_config("sms.enabled", False)
if not enabled:
log.warn("SMS service disabled")
factory.provide("sms", DisabledSms)
elif provider_name and safe_get_config("sms." + provider_name, None):
log.warn("SMS initialized to:" + provider_name)
# if provider other than ChinaTelecom is supported, update following lines
factory.provide("sms", ChinaTelecomSms)
else:
log.warn("Either SMS provider name or provider config is missing, Please check your configuration")
raise ConfigurationException("sms.provider")
def init_hackathon_storage():
"""Add storage implementation to hackathon factory
The type of storage is configured by ""storage.type"" in config.py which is 'local' by default
"""
from hackathon.storage import AzureStorage, LocalStorage
storage_type = safe_get_config("storage.type", "azure")
if storage_type == "azure":
# init BlobServiceAdapter first since AzureStorage depends on it. And accountKey must be included in config file
from hackathon.hazure import BlobServiceAdapter
factory.provide("azure_blob_service", BlobServiceAdapter)
factory.provide("storage", AzureStorage)
else:
factory.provide("storage", LocalStorage)
def init_schedule_jobs():
"""Init scheduled jobs
Note that scheduler job will NOT be enabled in main thread. So the real initialization work are completed in a
separated thread. Otherwise there might be dead lock in main thread.
"""
import threading
t = threading.Thread(target=__init_schedule_jobs)
t.start()
def __init_schedule_jobs():
"""Init scheduled jobs in fact"""
log.debug("init scheduled jobs......")
util = RequiredFeature("util")
sche = RequiredFeature("scheduler")
if not util.is_local():
hackathon_manager = RequiredFeature("hackathon_manager")
# schedule job to check recycle operation
next_run_time = util.get_now() + timedelta(seconds=10)
sche.add_interval(feature="expr_manager",
method="scheduler_recycle_expr",
id="scheduler_recycle_expr",
next_run_time=next_run_time,
minutes=10)
# schedule job to pre-allocate environment
hackathon_manager.schedule_pre_allocate_expr_job()
# schedule job to pull docker images automatically
#if not safe_get_config("docker.alauda.enabled", False):
# docker = RequiredFeature("hosted_docker_proxy")
# docker.ensure_images()
# schedule job to pre-create a docker host server VM
#host_server_manager.schedule_pre_allocate_host_server_job()
# init the overtime-sessions detection to update users' online status
sche.add_interval(feature="user_manager",
method="check_user_online_status",
id="check_user_online_status",
minutes=10)
def init_app():
"""Initialize the application.
Works including :
- setting up hackathon factory,
- register restful API routes
- initialize scheduled jobs
"""
init_components()
from views import init_routes
init_routes()
init_schedule_jobs()
health_check_guacamole = RequiredFeature("health_check_guacamole")
u = RequiredFeature("util")
if u.is_local():
log.debug("guacamole status: %s" % health_check_guacamole.report_health())
init_app()
|
_testing.py | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(
lc, rc, obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
check_less_precise=False,
check_exact=False,
index_values=None,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
if not is_numeric_dtype(left.dtype):
raise AssertionError("check_exact may only be used with numeric Series")
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values, right._values, index_values=np.asarray(left.index)
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values, right._values, index_values=np.asarray(left.index)
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
|
sensordata.py | """
SleekXMPP: The Sleek XMPP Library
Implementation of xeps for Internet of Things
http://wiki.xmpp.org/web/Tech_pages/IoT_systems
Copyright (C) 2013 Sustainable Innovation, Joachim.lindborg@sust.se, bjorn.westrom@consoden.se
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import time
import datetime
from threading import Thread, Lock, Timer
from sleekxmpp.plugins.xep_0323.timerreset import TimerReset
from sleekxmpp.xmlstream import JID
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0323 import stanza
from sleekxmpp.plugins.xep_0323.stanza import Sensordata
log = logging.getLogger(__name__)
class XEP_0323(BasePlugin):
"""
XEP-0323: IoT Sensor Data
This XEP provides the underlying architecture, basic operations and data
structures for sensor data communication over XMPP networks. It includes
a hardware abstraction model, removing any technical detail implemented
in underlying technologies.
Also see <http://xmpp.org/extensions/xep-0323.html>
Configuration Values:
threaded -- Indicates if communication with sensors should be threaded.
Defaults to True.
Events:
Sensor side
-----------
Sensordata Event:Req -- Received a request for data
Sensordata Event:Cancel -- Received a cancellation for a request
Client side
-----------
Sensordata Event:Accepted -- Received a accept from sensor for a request
Sensordata Event:Rejected -- Received a reject from sensor for a request
Sensordata Event:Cancelled -- Received a cancel confirm from sensor
Sensordata Event:Fields -- Received fields from sensor for a request
This may be triggered multiple times since
the sensor can split up its response in
multiple messages.
Sensordata Event:Failure -- Received a failure indication from sensor
for a request. Typically a comm timeout.
Attributes:
threaded -- Indicates if command events should be threaded.
Defaults to True.
sessions -- A dictionary or equivalent backend mapping
session IDs to dictionaries containing data
relevant to a request's session. This dictionary is used
both by the client and sensor side. On client side, seqnr
is used as key, while on sensor side, a session_id is used
as key. This ensures that the two will not collide, so
one instance can be both client and sensor.
Sensor side
-----------
nodes -- A dictionary mapping sensor nodes that are serviced through
this XMPP instance to their device handlers ("drivers").
Client side
-----------
last_seqnr -- The last used sequence number (integer). One sequence of
communication (e.g. -->request, <--accept, <--fields)
between client and sensor is identified by a unique
sequence number (unique between the client/sensor pair)
Methods:
plugin_init -- Overrides base_plugin.plugin_init
post_init -- Overrides base_plugin.post_init
plugin_end -- Overrides base_plugin.plugin_end
Sensor side
-----------
register_node -- Register a sensor as available from this XMPP
instance.
Client side
-----------
request_data -- Initiates a request for data from one or more
sensors. Non-blocking, a callback function will
be called when data is available.
"""
name = 'xep_0323'
description = 'XEP-0323 Internet of Things - Sensor Data'
dependencies = set(['xep_0030'])
stanza = stanza
default_config = {
'threaded': True
# 'session_db': None
}
def plugin_init(self):
""" Start the XEP-0323 plugin """
self.xmpp.register_handler(
Callback('Sensordata Event:Req',
StanzaPath('iq@type=get/req'),
self._handle_event_req))
self.xmpp.register_handler(
Callback('Sensordata Event:Accepted',
StanzaPath('iq@type=result/accepted'),
self._handle_event_accepted))
self.xmpp.register_handler(
Callback('Sensordata Event:Rejected',
StanzaPath('iq@type=error/rejected'),
self._handle_event_rejected))
self.xmpp.register_handler(
Callback('Sensordata Event:Cancel',
StanzaPath('iq@type=get/cancel'),
self._handle_event_cancel))
self.xmpp.register_handler(
Callback('Sensordata Event:Cancelled',
StanzaPath('iq@type=result/cancelled'),
self._handle_event_cancelled))
self.xmpp.register_handler(
Callback('Sensordata Event:Fields',
StanzaPath('message/fields'),
self._handle_event_fields))
self.xmpp.register_handler(
Callback('Sensordata Event:Failure',
StanzaPath('message/failure'),
self._handle_event_failure))
self.xmpp.register_handler(
Callback('Sensordata Event:Started',
StanzaPath('message/started'),
self._handle_event_started))
# Server side dicts
self.nodes = {};
self.sessions = {};
self.last_seqnr = 0;
self.seqnr_lock = Lock();
## For testning only
self.test_authenticated_from = ""
def post_init(self):
""" Init complete. Register our features in Serivce discovery. """
BasePlugin.post_init(self)
self.xmpp['xep_0030'].add_feature(Sensordata.namespace)
self.xmpp['xep_0030'].set_items(node=Sensordata.namespace, items=tuple())
def _new_session(self):
""" Return a new session ID. """
return str(time.time()) + '-' + self.xmpp.new_id()
def session_bind(self, jid):
logging.debug("setting the Disco discovery for %s" % Sensordata.namespace)
self.xmpp['xep_0030'].add_feature(Sensordata.namespace)
self.xmpp['xep_0030'].set_items(node=Sensordata.namespace, items=tuple())
def plugin_end(self):
""" Stop the XEP-0323 plugin """
self.sessions.clear();
self.xmpp.remove_handler('Sensordata Event:Req')
self.xmpp.remove_handler('Sensordata Event:Accepted')
self.xmpp.remove_handler('Sensordata Event:Rejected')
self.xmpp.remove_handler('Sensordata Event:Cancel')
self.xmpp.remove_handler('Sensordata Event:Cancelled')
self.xmpp.remove_handler('Sensordata Event:Fields')
self.xmpp['xep_0030'].del_feature(feature=Sensordata.namespace)
# =================================================================
# Sensor side (data provider) API
def register_node(self, nodeId, device, commTimeout, sourceId=None, cacheType=None):
"""
Register a sensor/device as available for serving of data through this XMPP
instance.
The device object may by any custom implementation to support
specific devices, but it must implement the functions:
has_field
request_fields
according to the interfaces shown in the example device.py file.
Arguments:
nodeId -- The identifier for the device
device -- The device object
commTimeout -- Time in seconds to wait between each callback from device during
a data readout. Float.
sourceId -- [optional] identifying the data source controlling the device
cacheType -- [optional] narrowing down the search to a specific kind of node
"""
self.nodes[nodeId] = {"device": device,
"commTimeout": commTimeout,
"sourceId": sourceId,
"cacheType": cacheType};
def _set_authenticated(self, auth=''):
""" Internal testing function """
self.test_authenticated_from = auth;
def _handle_event_req(self, iq):
"""
Event handler for reception of an Iq with req - this is a request.
Verifies that
- all the requested nodes are available
- at least one of the requested fields is available from at least
one of the nodes
If the request passes verification, an accept response is sent, and
the readout process is started in a separate thread.
If the verification fails, a reject message is sent.
"""
seqnr = iq['req']['seqnr'];
error_msg = '';
req_ok = True;
# Authentication
if len(self.test_authenticated_from) > 0 and not iq['from'] == self.test_authenticated_from:
# Invalid authentication
req_ok = False;
error_msg = "Access denied";
# Nodes
process_nodes = [];
if len(iq['req']['nodes']) > 0:
for n in iq['req']['nodes']:
if not n['nodeId'] in self.nodes:
req_ok = False;
error_msg = "Invalid nodeId " + n['nodeId'];
process_nodes = [n['nodeId'] for n in iq['req']['nodes']];
else:
process_nodes = self.nodes.keys();
# Fields - if we just find one we are happy, otherwise we reject
process_fields = [];
if len(iq['req']['fields']) > 0:
found = False
for f in iq['req']['fields']:
for node in self.nodes:
if self.nodes[node]["device"].has_field(f['name']):
found = True;
break;
if not found:
req_ok = False;
error_msg = "Invalid field " + f['name'];
process_fields = [f['name'] for n in iq['req']['fields']];
req_flags = iq['req']._get_flags();
request_delay_sec = None
if 'when' in req_flags:
# Timed request - requires datetime string in iso format
# ex. 2013-04-05T15:00:03
dt = None
try:
dt = datetime.datetime.strptime(req_flags['when'], "%Y-%m-%dT%H:%M:%S")
except ValueError:
req_ok = False;
error_msg = "Invalid datetime in 'when' flag, please use ISO format (i.e. 2013-04-05T15:00:03)."
if not dt is None:
# Datetime properly formatted
dtnow = datetime.datetime.now()
dtdiff = dt - dtnow
request_delay_sec = dtdiff.seconds + dtdiff.days * 24 * 3600
if request_delay_sec <= 0:
req_ok = False;
error_msg = "Invalid datetime in 'when' flag, cannot set a time in the past. Current time: " + dtnow.isoformat();
if req_ok:
session = self._new_session();
self.sessions[session] = {"from": iq['from'], "to": iq['to'], "seqnr": seqnr};
self.sessions[session]["commTimers"] = {};
self.sessions[session]["nodeDone"] = {};
#print("added session: " + str(self.sessions))
iq.reply();
iq['accepted']['seqnr'] = seqnr;
if not request_delay_sec is None:
iq['accepted']['queued'] = "true"
iq.send(block=False);
self.sessions[session]["node_list"] = process_nodes;
if not request_delay_sec is None:
# Delay request to requested time
timer = Timer(request_delay_sec, self._event_delayed_req, args=(session, process_fields, req_flags))
self.sessions[session]["commTimers"]["delaytimer"] = timer;
timer.start();
return
if self.threaded:
#print("starting thread")
tr_req = Thread(target=self._threaded_node_request, args=(session, process_fields, req_flags))
tr_req.start()
#print("started thread")
else:
self._threaded_node_request(session, process_fields, req_flags);
else:
iq.reply();
iq['type'] = 'error';
iq['rejected']['seqnr'] = seqnr;
iq['rejected']['error'] = error_msg;
iq.send(block=False);
def _threaded_node_request(self, session, process_fields, flags):
"""
Helper function to handle the device readouts in a separate thread.
Arguments:
session -- The request session id
process_fields -- The fields to request from the devices
flags -- [optional] flags to pass to the devices, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
"""
for node in self.sessions[session]["node_list"]:
self.sessions[session]["nodeDone"][node] = False;
for node in self.sessions[session]["node_list"]:
timer = TimerReset(self.nodes[node]['commTimeout'], self._event_comm_timeout, args=(session, node));
self.sessions[session]["commTimers"][node] = timer;
#print("Starting timer " + str(timer) + ", timeout: " + str(self.nodes[node]['commTimeout']))
timer.start();
self.nodes[node]['device'].request_fields(process_fields, flags=flags, session=session, callback=self._device_field_request_callback);
def _event_comm_timeout(self, session, nodeId):
"""
Triggered if any of the readout operations timeout.
Sends a failure message back to the client, stops communicating
with the failing device.
Arguments:
session -- The request session id
nodeId -- The id of the device which timed out
"""
msg = self.xmpp.Message();
msg['from'] = self.sessions[session]['to'];
msg['to'] = self.sessions[session]['from'];
msg['failure']['seqnr'] = self.sessions[session]['seqnr'];
msg['failure']['error']['text'] = "Timeout";
msg['failure']['error']['nodeId'] = nodeId;
msg['failure']['error']['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
# Drop communication with this device and check if we are done
self.sessions[session]["nodeDone"][nodeId] = True;
if (self._all_nodes_done(session)):
msg['failure']['done'] = 'true';
msg.send();
# The session is complete, delete it
#print("del session " + session + " due to timeout")
del self.sessions[session];
def _event_delayed_req(self, session, process_fields, req_flags):
"""
Triggered when the timer from a delayed request fires.
Arguments:
session -- The request session id
process_fields -- The fields to request from the devices
flags -- [optional] flags to pass to the devices, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
"""
msg = self.xmpp.Message();
msg['from'] = self.sessions[session]['to'];
msg['to'] = self.sessions[session]['from'];
msg['started']['seqnr'] = self.sessions[session]['seqnr'];
msg.send();
if self.threaded:
tr_req = Thread(target=self._threaded_node_request, args=(session, process_fields, req_flags))
tr_req.start()
else:
self._threaded_node_request(session, process_fields, req_flags);
def _all_nodes_done(self, session):
"""
Checks wheter all devices are done replying to the readout.
Arguments:
session -- The request session id
"""
for n in self.sessions[session]["nodeDone"]:
if not self.sessions[session]["nodeDone"][n]:
return False;
return True;
def _device_field_request_callback(self, session, nodeId, result, timestamp_block, error_msg=None):
"""
Callback function called by the devices when they have any additional data.
Composes a message with the data and sends it back to the client, and resets
the timeout timer for the device.
Arguments:
session -- The request session id
nodeId -- The device id which initiated the callback
result -- The current result status of the readout. Valid values are:
"error" - Readout failed.
"fields" - Contains readout data.
"done" - Indicates that the readout is complete. May contain
readout data.
timestamp_block -- [optional] Only applies when result != "error"
The readout data. Structured as a dictionary:
{
timestamp: timestamp for this datablock,
fields: list of field dictionary (one per readout field).
readout field dictionary format:
{
type: The field type (numeric, boolean, dateTime, timeSpan, string, enum)
name: The field name
value: The field value
unit: The unit of the field. Only applies to type numeric.
dataType: The datatype of the field. Only applies to type enum.
flags: [optional] data classifier flags for the field, e.g. momentary
Formatted as a dictionary like { "flag name": "flag value" ... }
}
}
error_msg -- [optional] Only applies when result == "error".
Error details when a request failed.
"""
if not session in self.sessions:
# This can happend if a session was deleted, like in a cancellation. Just drop the data.
return
if result == "error":
self.sessions[session]["commTimers"][nodeId].cancel();
msg = self.xmpp.Message();
msg['from'] = self.sessions[session]['to'];
msg['to'] = self.sessions[session]['from'];
msg['failure']['seqnr'] = self.sessions[session]['seqnr'];
msg['failure']['error']['text'] = error_msg;
msg['failure']['error']['nodeId'] = nodeId;
msg['failure']['error']['timestamp'] = datetime.datetime.now().replace(microsecond=0).isoformat();
# Drop communication with this device and check if we are done
self.sessions[session]["nodeDone"][nodeId] = True;
if (self._all_nodes_done(session)):
msg['failure']['done'] = 'true';
# The session is complete, delete it
# print("del session " + session + " due to error")
del self.sessions[session];
msg.send();
else:
msg = self.xmpp.Message();
msg['from'] = self.sessions[session]['to'];
msg['to'] = self.sessions[session]['from'];
msg['fields']['seqnr'] = self.sessions[session]['seqnr'];
if timestamp_block is not None and len(timestamp_block) > 0:
node = msg['fields'].add_node(nodeId);
ts = node.add_timestamp(timestamp_block["timestamp"]);
for f in timestamp_block["fields"]:
data = ts.add_data( typename=f['type'],
name=f['name'],
value=f['value'],
unit=f['unit'],
dataType=f['dataType'],
flags=f['flags']);
if result == "done":
self.sessions[session]["commTimers"][nodeId].cancel();
self.sessions[session]["nodeDone"][nodeId] = True;
msg['fields']['done'] = 'true';
if (self._all_nodes_done(session)):
# The session is complete, delete it
# print("del session " + session + " due to complete")
del self.sessions[session];
else:
# Restart comm timer
self.sessions[session]["commTimers"][nodeId].reset();
msg.send();
def _handle_event_cancel(self, iq):
""" Received Iq with cancel - this is a cancel request.
Delete the session and confirm. """
seqnr = iq['cancel']['seqnr'];
# Find the session
for s in self.sessions:
if self.sessions[s]['from'] == iq['from'] and self.sessions[s]['to'] == iq['to'] and self.sessions[s]['seqnr'] == seqnr:
# found it. Cancel all timers
for n in self.sessions[s]["commTimers"]:
self.sessions[s]["commTimers"][n].cancel();
# Confirm
iq.reply();
iq['type'] = 'result';
iq['cancelled']['seqnr'] = seqnr;
iq.send(block=False);
# Delete session
del self.sessions[s]
return
# Could not find session, send reject
iq.reply();
iq['type'] = 'error';
iq['rejected']['seqnr'] = seqnr;
iq['rejected']['error'] = "Cancel request received, no matching request is active.";
iq.send(block=False);
# =================================================================
# Client side (data retriever) API
def request_data(self, from_jid, to_jid, callback, nodeIds=None, fields=None, flags=None):
"""
Called on the client side to initiade a data readout.
Composes a message with the request and sends it to the device(s).
Does not block, the callback will be called when data is available.
Arguments:
from_jid -- The jid of the requester
to_jid -- The jid of the device(s)
callback -- The callback function to call when data is availble.
The callback function must support the following arguments:
from_jid -- The jid of the responding device(s)
result -- The current result status of the readout. Valid values are:
"accepted" - Readout request accepted
"queued" - Readout request accepted and queued
"rejected" - Readout request rejected
"failure" - Readout failed.
"cancelled" - Confirmation of request cancellation.
"started" - Previously queued request is now started
"fields" - Contains readout data.
"done" - Indicates that the readout is complete.
nodeId -- [optional] Mandatory when result == "fields" or "failure".
The node Id of the responding device. One callback will only
contain data from one device.
timestamp -- [optional] Mandatory when result == "fields".
The timestamp of data in this callback. One callback will only
contain data from one timestamp.
fields -- [optional] Mandatory when result == "fields".
List of field dictionaries representing the readout data.
Dictionary format:
{
typename: The field type (numeric, boolean, dateTime, timeSpan, string, enum)
name: The field name
value: The field value
unit: The unit of the field. Only applies to type numeric.
dataType: The datatype of the field. Only applies to type enum.
flags: [optional] data classifier flags for the field, e.g. momentary.
Formatted as a dictionary like { "flag name": "flag value" ... }
}
error_msg -- [optional] Mandatory when result == "rejected" or "failure".
Details about why the request is rejected or failed.
"rejected" means that the request is stopped, but note that the
request will continue even after a "failure". "failure" only means
that communication was stopped to that specific device, other
device(s) (if any) will continue their readout.
nodeIds -- [optional] Limits the request to the node Ids in this list.
fields -- [optional] Limits the request to the field names in this list.
flags -- [optional] Limits the request according to the flags, or sets
readout conditions such as timing.
Return value:
session -- Session identifier. Client can use this as a reference to cancel
the request.
"""
iq = self.xmpp.Iq();
iq['from'] = from_jid;
iq['to'] = to_jid;
iq['type'] = "get";
seqnr = self._get_new_seqnr();
iq['id'] = seqnr;
iq['req']['seqnr'] = seqnr;
if nodeIds is not None:
for nodeId in nodeIds:
iq['req'].add_node(nodeId);
if fields is not None:
for field in fields:
iq['req'].add_field(field);
iq['req']._set_flags(flags);
self.sessions[seqnr] = {"from": iq['from'], "to": iq['to'], "seqnr": seqnr, "callback": callback};
iq.send(block=False);
return seqnr;
def cancel_request(self, session):
"""
Called on the client side to cancel a request for data readout.
Composes a message with the cancellation and sends it to the device(s).
Does not block, the callback will be called when cancellation is
confirmed.
Arguments:
session -- The session id of the request to cancel
"""
seqnr = session
iq = self.xmpp.Iq();
iq['from'] = self.sessions[seqnr]['from']
iq['to'] = self.sessions[seqnr]['to'];
iq['type'] = "get";
iq['id'] = seqnr;
iq['cancel']['seqnr'] = seqnr;
iq.send(block=False);
def _get_new_seqnr(self):
""" Returns a unique sequence number (unique across threads) """
self.seqnr_lock.acquire();
self.last_seqnr = self.last_seqnr + 1;
self.seqnr_lock.release();
return str(self.last_seqnr);
def _handle_event_accepted(self, iq):
""" Received Iq with accepted - request was accepted """
seqnr = iq['accepted']['seqnr'];
result = "accepted"
if iq['accepted']['queued'] == 'true':
result = "queued"
callback = self.sessions[seqnr]["callback"];
callback(from_jid=iq['from'], result=result);
def _handle_event_rejected(self, iq):
""" Received Iq with rejected - this is a reject.
Delete the session. """
seqnr = iq['rejected']['seqnr'];
callback = self.sessions[seqnr]["callback"];
callback(from_jid=iq['from'], result="rejected", error_msg=iq['rejected']['error']);
# Session terminated
del self.sessions[seqnr];
def _handle_event_cancelled(self, iq):
"""
Received Iq with cancelled - this is a cancel confirm.
Delete the session.
"""
#print("Got cancelled")
seqnr = iq['cancelled']['seqnr'];
callback = self.sessions[seqnr]["callback"];
callback(from_jid=iq['from'], result="cancelled");
# Session cancelled
del self.sessions[seqnr];
def _handle_event_fields(self, msg):
"""
Received Msg with fields - this is a data reponse to a request.
If this is the last data block, issue a "done" callback.
"""
seqnr = msg['fields']['seqnr'];
callback = self.sessions[seqnr]["callback"];
for node in msg['fields']['nodes']:
for ts in node['timestamps']:
fields = [];
for d in ts['datas']:
field_block = {};
field_block["name"] = d['name'];
field_block["typename"] = d._get_typename();
field_block["value"] = d['value'];
if not d['unit'] == "": field_block["unit"] = d['unit'];
if not d['dataType'] == "": field_block["dataType"] = d['dataType'];
flags = d._get_flags();
if not len(flags) == 0:
field_block["flags"] = flags;
fields.append(field_block);
callback(from_jid=msg['from'], result="fields", nodeId=node['nodeId'], timestamp=ts['value'], fields=fields);
if msg['fields']['done'] == "true":
callback(from_jid=msg['from'], result="done");
# Session done
del self.sessions[seqnr];
def _handle_event_failure(self, msg):
"""
Received Msg with failure - our request failed
Delete the session.
"""
seqnr = msg['failure']['seqnr'];
callback = self.sessions[seqnr]["callback"];
callback(from_jid=msg['from'], result="failure", nodeId=msg['failure']['error']['nodeId'], timestamp=msg['failure']['error']['timestamp'], error_msg=msg['failure']['error']['text']);
# Session failed
del self.sessions[seqnr];
def _handle_event_started(self, msg):
"""
Received Msg with started - our request was queued and is now started.
"""
seqnr = msg['started']['seqnr'];
callback = self.sessions[seqnr]["callback"];
callback(from_jid=msg['from'], result="started");
|
clientchat.py |
import os
import sys
import threading
from socket import *
import warcode as wc
import configurationmanager as cm
import securedsocket as ss
import sys, traceback
class ClientChat:
def __init__(self,send_udp_socket,recv_udp_socket,name):
self.name = name.strip()
print("Hi " + self.name.upper() + ", welcome to the battleship game!")
self.running = True
self.messages = []
self.threads = []
self.host_name = cm.server_host
self.udp_server_port = cm.udp_server_port
self.code = wc.WarCode()
# prepare for UDP transmission (chat room)
self.udp_send_socket = ss.RSASocket(send_udp_socket)
self.udp_recv_socket = ss.RSASocket(recv_udp_socket)
# thread to handle messages reception
msg_reception_t = threading.Thread(target=self.handle_udp_msg_reception)
msg_reception_t.daemon = True
msg_reception_t.start()
self.threads.append(msg_reception_t)
# thread to handle messages display
msg_handler_t = threading.Thread(target=self.handle_udp_msg_display)
msg_handler_t.daemon = True
msg_handler_t.start()
self.threads.append(msg_handler_t)
# finishes all the ports
def quit(self):
self.running = False
temp_socket = socket(AF_INET, SOCK_DGRAM)
temp_socket.sendto("Bye".encode(), (self.host_name, self.udp_server_port))
for t in self.threads: # stops threads
t.join()
try:
temp_socket.close()
self.udp_send_socket.close()
self.udp_recv_socket.close()
except:
pass
# reads message from socket continuously
def handle_udp_msg_reception(self):
print("\tRECEPTION: TID = ", threading.current_thread()) ####################
while self.running:
decoded_msg, address = self.udp_recv_socket.recvfrom(1024)
self.code.translate(decoded_msg)
if (self.code.is_acknowledgement):
continue # if it is an acknowledgement dont addit to the spool
self.udp_recv_socket.sendto(self.code.acknowledgement(),address)
self.messages.append(decoded_msg)
print("\tRECEPTION: thread finished") ####################
# pop messages from the messages spool
def handle_udp_msg_display(self):
while self.running:
if (self.messages != []):
print(self.messages.pop() + "\n")
# this will send a message to the chat room
def send_msg(self, message):
try:
msg_tokens = message.split(" ")
com = msg_tokens[0]
msg = self.name + ": " + " ".join(msg_tokens[1:])
if (com == "-g"):
coded_message = self.code.game_message(msg)
elif (com == "-t"):
coded_message = self.code.team_message(msg)
elif (com == "-p"):
player_name = msg_tokens[1][1:] if msg_tokens[1].find("-")==0 else "*"
msg = self.name + ": " + " ".join(msg_tokens[2:])
coded_message = self.code.player_message(player_name,msg)
else:
coded_message = self.code.public_message(self.name + ": " + message)
self.udp_send_socket.sendto(coded_message, (self.host_name, self.udp_server_port))
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
def main():
code = input("Please enter the given code ")
try:
chat_file_name = "client" + code
with open(chat_file_name, "r") as my_file:
lines = my_file.readlines()
os.remove(chat_file_name)
name = lines[0]
udp_send = lines[1].split(",")
print(" sending host " + udp_send[0] + "port " + udp_send[1])
udp_recv = lines[2].split(",")
print(" receiving host " + udp_recv[0] + "port " + udp_recv[1])
print(str((udp_send[0][1:-1], int(udp_send[1]))))
send_udp_socket = socket(AF_INET, SOCK_DGRAM)
send_udp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
send_udp_socket.bind((udp_send[0][1:-1], int(udp_send[1])))
recv_udp_socket = socket(AF_INET, SOCK_DGRAM)
recv_udp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
recv_udp_socket.bind((udp_recv[0][1:-1], int(udp_recv[1])))
print("bound")
client_chat = ClientChat(send_udp_socket,recv_udp_socket,name)
except Exception as e:
print(e)
print("Unexpected error")
return
try:
running = True
print("Legend to send messages :")
print("prefix -g\tto all players in this game : '-g Lets start guys ' ")
print("prefix -t\tto all players in your team : '-t We won guys ' ")
print("prefix -p\tto a -player in this server : '-p -bob finish him!! ' ")
while running:
running = cm.client_is_running
msg = input(":")
client_chat.send_msg(msg)
client_chat.quit()
except:
pass
finally:
sys.exit(0)
if __name__ == "__main__":
main() |
test.py | #!/usr/bin/env python3
# Copyright 2020 Anapaya Systems
import http.server
import time
import threading
from plumbum import cmd
from acceptance.common import base
from acceptance.common import tools
from acceptance.common import scion
class Test(base.TestBase):
"""
Constructs a simple Hidden Paths topology with one core, four leaf ASes and
two hidden path groups.
AS 1-ff00:0:1 is core.
AS 1-ff00:0:2, 1-ff00:0:3, 1-ff00:0:4, 1-ff00:0:5 are leaves.
We use the shortnames AS1, AS2, etc. for the ASes above.
The two hidden paths groups are owned by the registry AS, and indexed
according to who the writer AS is. The groups are as follows:
Group ff00:0:2-3 contains the following roles:
Registry: AS2
Writer: AS3
Client: AS5
Group ff00:0:2-4 contains the following roles
Registry: AS2
Writer: AS4
Client: AS5
We test for connectivity between all pairs of ASes in the same group.
Testing is done using showpaths with JSON output.
Additionally, we test that the ASes in different groups cannot talk
to each other. Thus, the tests are:
Expect connectivity:
AS2 <-> AS3, AS2 <-> AS5, AS3 <-> AS5 (Group ff00:0:2-3)
AS2 <-> AS4, AS2 <-> AS5, AS4 <-> AS5 (Group ff00:0:2-4)
Expect no connectivity:
AS3 <-> AS4 (Group ff00:0:2-3 to group ff00:0:2-4)
"""
def main(self):
if not self.nested_command:
try:
self.setup()
time.sleep(20)
self._run()
finally:
self.teardown()
def setup(self):
self.setup_prepare()
http_server_port = 9090
as_numbers = ["2", "3", "4", "5"]
# HTTP configuration server runs on 0.0.0.0 and needs to be reachable from
# every daemon and control service. There is one host IP on every AS bridge.
# We use this IP for the configuration download URLs.
server_ips = {
"2": "172.20.0.49",
"3": "172.20.0.57",
"4": "172.20.0.65",
"5": "172.20.0.73",
}
# XXX(lukedirtwalker): The ports below are the dynamic QUIC server
# ports. Thanks to the docker setup they are setup consistently so we
# can use them. Optimally we would define a static server port inside
# the CS and use that one instead.
control_addresses = {
"2": "172.20.0.51:32768",
"3": "172.20.0.59:32768",
"4": "172.20.0.67:32768",
"5": "172.20.0.75:32768",
}
# Each AS participating in hidden paths has their own hidden paths configuration file.
hp_configs = {
"2": "hp_groups_as2_as5.yml",
"3": "hp_groups_as3.yml",
"4": "hp_groups_as4.yml",
"5": "hp_groups_as2_as5.yml",
}
# Edit all the configuration files of daemons and control services with
# the computed configuration URL
for as_number in as_numbers:
hp_config_url = "http://%s:%d/acceptance/hidden_paths/testdata/%s" % (
server_ips[as_number], http_server_port, hp_configs[as_number])
as_dir = "ASff00_0_%s" % as_number
as_dir_path = self.test_state.artifacts / "gen" / as_dir
daemon_path = as_dir_path / "sd.toml"
scion.update_toml({"sd.hidden_path_groups": hp_config_url}, [daemon_path])
control_id = "cs1-ff00_0_%s-1" % as_number
control_file = "%s.toml" % control_id
control_path = as_dir_path / control_file
scion.update_toml({"path.hidden_paths_cfg": hp_config_url}, [control_path])
# For simplicity, expose the services in all hidden paths ASes,
# even though some don't need the registration service.
topology_update = {
"hidden_segment_lookup_service.%s.addr" % control_id:
control_addresses[as_number],
"hidden_segment_registration_service.%s.addr" % control_id:
control_addresses[as_number],
}
topology_file = as_dir_path / "topology.json"
scion.update_json(topology_update, [topology_file])
server = http.server.HTTPServer(("0.0.0.0", 9090), http.server.SimpleHTTPRequestHandler)
server_thread = threading.Thread(target=configuration_server, args=[server])
server_thread.start()
self.setup_start()
time.sleep(4) # Give applications time to download configurations
self._testers = {
"2": "tester_1-ff00_0_2",
"3": "tester_1-ff00_0_3",
"4": "tester_1-ff00_0_4",
"5": "tester_1-ff00_0_5",
}
self._ases = {
"2": "1-ff00:0:2",
"3": "1-ff00:0:3",
"4": "1-ff00:0:4",
"5": "1-ff00:0:5",
}
self._daemons_api = {
"2": "172.20.0.52:30255",
"3": "172.20.0.60:30255",
"4": "172.20.0.68:30255",
"5": "172.20.0.76:30255",
}
self._dispatcher_ips = {
"2": "172.20.0.51",
"3": "172.20.0.59",
"4": "172.20.0.67",
"5": "172.20.0.75",
}
server.shutdown()
def _run(self):
# Group 3
self._showpaths_bidirectional("2", "3", 0)
self._showpaths_bidirectional("2", "5", 0)
self._showpaths_bidirectional("3", "5", 0)
# Group 4
self._showpaths_bidirectional("2", "4", 0)
self._showpaths_bidirectional("2", "5", 0)
self._showpaths_bidirectional("4", "5", 0)
# Group 3 X 4
self._showpaths_bidirectional("3", "4", 1)
def _showpaths_bidirectional(self, source: str, destination: str, retcode: int):
self._showpaths_run(source, destination, retcode)
self._showpaths_run(destination, source, retcode)
def _showpaths_run(self, source_as: str, destination_as: str, retcode: int):
print(cmd.docker("exec", "-t", self._testers[source_as], "./bin/scion",
"sp", self._ases[destination_as],
"--sciond", self._daemons_api[source_as],
"--local", self._dispatcher_ips[source_as],
"--timeout", "2s",
retcode=retcode))
def configuration_server(server):
print("HTTP configuration server starting on %s:%d." % server.server_address)
server.serve_forever()
print("HTTP configuration server closed.")
if __name__ == "__main__":
base.register_commands(Test)
Test.test_state = base.TestState(scion.SCIONDocker(), tools.DC())
Test.run()
|
fastscan.py | from database import mushilogger
import socket
import threading
import sys
class FastScan():
def __init__(self):
print("init NetworkScan..")
self.mlogger = mushilogger.MushiLogger()
def execute_fastscan(self, ipaddr):
#scan_range = [1, 1024]
#scan_port = [21,22,23,25,53,80,88,110,123,135,139,143,389,443,445,465,587,993,995,1433,1521,3306,3389,5432,5900,5985,8080]
scan_port = [22,80,88,135,139,389,443,445,3389]
threads = []
ports = []
#isopen = []
def Run(port, i):
try:
con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return_code = con.connect_ex((ipaddr, port))
con.close()
if return_code == 0:
#isopen[i] = 1
ports.append(port)
except:
pass
count = 0
#for port in range(scan_range[0], scan_range[1]):
for port in scan_port:
#ports.append(port)
#isopen.append(0)
thread = threading.Thread(target=Run, args=(port, count))
thread.start()
threads.append(thread)
count = count + 1
#for i in range(len(threads)):
# threads[i].join()
# if isopen[i] == 1:
# print("%d open" % ports[i])
for t in threads:
t.join()
#print("open port = {}".format(ports))
print(ports)
|
program.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TensorBoard command line program.
This is a lightweight module for bringing up a TensorBoard HTTP server
or emulating the `tensorboard` shell command.
Those wishing to create custom builds of TensorBoard can use this module
by swapping out `tensorboard.main` with the custom definition that
modifies the set of plugins and static assets.
This module does not depend on first-party plugins or the default web
server assets. Those are defined in `tensorboard.default`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import argparse
import atexit
from collections import defaultdict
import errno
import os
import signal
import socket
import sys
import threading
import time
import inspect
import absl.logging
import six
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from tensorboard import manager
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import tb_logging
try:
from absl import flags as absl_flags
from absl.flags import argparse_flags
except ImportError:
# Fall back to argparse with no absl flags integration.
absl_flags = None
argparse_flags = argparse
logger = tb_logging.get_logger()
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
absl.logging.set_verbosity(absl.logging.WARNING)
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
class TensorBoard(object):
"""Class for running TensorBoard.
Fields:
plugin_loaders: Set from plugins passed to constructor.
assets_zip_provider: Set by constructor.
server_class: Set by constructor.
flags: An argparse.Namespace set by the configure() method.
cache_key: As `manager.cache_key`; set by the configure() method.
"""
def __init__(self,
plugins=None,
assets_zip_provider=None,
server_class=None):
"""Creates new instance.
Args:
plugins: A list of TensorBoard plugins to load, as TBLoader instances or
TBPlugin classes. If not specified, defaults to first-party plugins.
assets_zip_provider: Delegates to TBContext or uses default if None.
server_class: An optional factory for a `TensorBoardServer` to use
for serving the TensorBoard WSGI app. If provided, its callable
signature should match that of `TensorBoardServer.__init__`.
:type plugins: list[Union[base_plugin.TBLoader, Type[base_plugin.TBPlugin]]]
:type assets_zip_provider: () -> file
:type server_class: class
"""
if plugins is None:
from tensorboard import default
plugins = default.get_plugins()
if assets_zip_provider is None:
assets_zip_provider = get_default_assets_zip_provider()
if server_class is None:
server_class = create_port_scanning_werkzeug_server
def make_loader(plugin):
if isinstance(plugin, base_plugin.TBLoader):
return plugin
if issubclass(plugin, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin)
raise ValueError("Not a TBLoader or TBPlugin subclass: %s" % plugin)
self.plugin_loaders = [make_loader(p) for p in plugins]
self.assets_zip_provider = assets_zip_provider
self.server_class = server_class
self.flags = None
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
self.cache_key = manager.cache_key(
working_directory=os.getcwd(),
arguments=argv[1:],
configure_kwargs=kwargs,
)
if absl_flags and arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
self._install_signal_handler(signal.SIGTERM, "SIGTERM")
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' %
(version.VERSION, server.get_url()))
sys.stderr.flush()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _register_info(self, server):
"""Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
"""
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
def _install_signal_handler(self, signal_number, signal_name):
"""Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name.
"""
old_signal_handler = None # set below
def handler(handled_signal_number, frame):
# In case we catch this signal again while running atexit
# handlers, take the hint and actually die.
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
# The main thread is the only non-daemon thread, so it suffices to
# exit hence.
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler)
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
@six.add_metaclass(ABCMeta)
class TensorBoardServer(object):
"""Class for customizing TensorBoard WSGI app serving."""
@abstractmethod
def __init__(self, wsgi_app, flags):
"""Create a flag-configured HTTP server for TensorBoard's WSGI app.
Args:
wsgi_app: The TensorBoard WSGI application to create a server for.
flags: argparse.Namespace instance of TensorBoard flags.
"""
raise NotImplementedError()
@abstractmethod
def serve_forever(self):
"""Blocking call to start serving the TensorBoard server."""
raise NotImplementedError()
@abstractmethod
def get_url(self):
"""Returns a URL at which this server should be reachable."""
raise NotImplementedError()
class TensorBoardServerException(Exception):
"""Exception raised by TensorBoardServer for user-friendly errors.
Subclasses of TensorBoardServer can raise this exception in order to
generate a clean error message for the user rather than a stacktrace.
"""
def __init__(self, msg):
self.msg = msg
class TensorBoardPortInUseError(TensorBoardServerException):
"""Error raised when attempting to bind to a port that is in use.
This should be raised when it is expected that binding to another
similar port would succeed. It is used as a signal to indicate that
automatic port searching should continue rather than abort.
"""
pass
def with_port_scanning(cls):
"""Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
"""
def init(wsgi_app, flags):
# base_port: what's the first port to which we should try to bind?
# should_scan: if that fails, shall we try additional ports?
# max_attempts: how many ports shall we try?
should_scan = flags.port is None
base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port
max_attempts = 10 if should_scan else 1
if base_port > 0xFFFF:
raise TensorBoardServerException(
'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF)
)
max_attempts = 10 if should_scan else 1
base_port = min(base_port + max_attempts, 0x10000) - max_attempts
for port in xrange(base_port, base_port + max_attempts):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if not should_scan:
raise
# All attempts failed to bind.
raise TensorBoardServerException(
'TensorBoard could not bind to any port around %s '
'(tried %d times)'
% (base_port, max_attempts))
return init
class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer):
"""Implementation of TensorBoardServer using the Werkzeug dev server."""
# ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x.
daemon_threads = True
def __init__(self, wsgi_app, flags):
self._flags = flags
host = flags.host
port = flags.port
# Without an explicit host, we default to serving on all interfaces,
# and will attempt to serve both IPv4 and IPv6 traffic through one
# socket.
self._auto_wildcard = not host
if self._auto_wildcard:
host = self._get_wildcard_address(port)
try:
super(WerkzeugServer, self).__init__(host, port, wsgi_app)
except socket.error as e:
if hasattr(errno, 'EACCES') and e.errno == errno.EACCES:
raise TensorBoardServerException(
'TensorBoard must be run as superuser to bind to port %d' %
port)
elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE:
if port == 0:
raise TensorBoardServerException(
'TensorBoard unable to find any open port')
else:
raise TensorBoardPortInUseError(
'TensorBoard could not bind to port %d, it was already in use' %
port)
elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL:
raise TensorBoardServerException(
'TensorBoard could not bind to unavailable address %s' % host)
elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT:
raise TensorBoardServerException(
'Tensorboard could not bind to unsupported address family %s' %
host)
# Raise the raw exception if it wasn't identifiable as a user error.
raise
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
def get_url(self):
if self._auto_wildcard:
display_host = socket.gethostname()
else:
host = self._flags.host
display_host = (
'[%s]' % host if ':' in host and not host.startswith('[') else host)
return 'http://%s:%d%s/' % (display_host, self.server_port,
self._flags.path_prefix.rstrip('/'))
create_port_scanning_werkzeug_server = with_port_scanning(WerkzeugServer)
|
prepare.py | #!/usr/bin/env python2.7
# pylint: disable=bad-indentation, no-member, invalid-name, line-too-long
import os
import shutil
import random
import argparse
import multiprocessing
import cv2
import lmdb
import caffe
import numpy as np
from jfda.config import cfg
from jfda.utils import load_wider, load_celeba
from jfda.utils import get_logger, crop_face
from jfda.detector import JfdaDetector
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
from bbox import bbox_overlaps
logger = get_logger()
G8 = 8*1024*1024*1024
G16 = 2*G8
G24 = 3*G8
G32 = 4*G8
def fill_queues(data, qs):
data_n = len(data)
queue_n = len(qs)
for i in range(len(data)):
qs[i%queue_n].put(data[i])
def remove_if_exists(db):
if os.path.exists(db):
logger.info('remove %s'%db)
shutil.rmtree(db)
def get_detector():
nets = cfg.PROPOSAL_NETS[cfg.NET_TYPE]
if nets is None or not cfg.USE_DETECT:
detector = None
else:
if cfg.GPU_ID >= 0:
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.set_mode_cpu()
detector = JfdaDetector(nets)
return detector
# =========== region proposal =============================
def sliding_windows(x, y, width, height, kw, kh, sw, sh):
'''given a region (x, y, width, height), return sliding window locations (x1, y1, x2, y2)
x, y: region top left position
width, height: region width and height
kw, kh: window width and height
sw, sh: stride width and height
'''
xs = np.arange(0, width-kw, sw)
ys = np.arange(0, height-kh, sh)
xs, ys = np.meshgrid(xs, ys)
xy = np.vstack([xs.ravel(), ys.ravel()]).transpose()
wh = np.array([kw, kh])
bbox = np.hstack([xy, np.tile(wh, (len(xy), 1))])
bbox[:, 0] += x
bbox[:, 1] += y
bbox[:, 2] += bbox[:, 0]
bbox[:, 3] += bbox[:, 1]
return bbox.astype(np.float32)
def proposal(img, gt_bboxes, detector=None):
'''given an image with face bboxes, proposal negatives, positives and part faces
for rNet and oNet, we use previous networks to proposal bboxes
Return
(negatives, positives, part)
negatives: [data, bbox]
positives: [(data, bbox, bbox_target)]
part: [(data, bbox, bbox_target)]
'''
# ======================= proposal for rnet and onet ==============
if detector is not None:
assert isinstance(detector, JfdaDetector)
bboxes = detector.detect(img, **cfg.DETECT_PARAMS)
# # maybe sort it by score in descending order
# bboxes = bboxes[bboxes[:, 4].argsort()[::-1]]
# keep bbox info, drop score, offset and landmark
bboxes = bboxes[:, :4]
ovs = bbox_overlaps(bboxes, gt_bboxes)
ovs_max = ovs.max(axis=1)
ovs_idx = ovs.argmax(axis=1)
pos_idx = np.where(ovs_max > cfg.FACE_OVERLAP)[0]
neg_idx = np.where(ovs_max < cfg.NONFACE_OVERLAP)[0]
part_idx = np.where(np.logical_and(ovs_max > cfg.PARTFACE_OVERLAP, ovs_max <= cfg.FACE_OVERLAP))[0]
# pos
positives = []
for idx in pos_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('pos', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
positives.append((data, bbox, bbox_target))
# part
part = []
for idx in part_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
part.append((data, bbox, bbox_target))
# neg
negatives = []
np.random.shuffle(neg_idx)
for idx in neg_idx[:cfg.NEG_DETECT_PER_IMAGE]:
bbox = bboxes[idx].reshape(4)
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('neg', data)
# cv2.waitKey()
negatives.append((data, bbox))
return negatives, positives, part
# ======================= proposal for pnet =======================
height, width = img.shape[:-1]
negatives, positives, part = [], [], []
# ===== proposal positives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_positives = []
for scale in cfg.POS_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.POS_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
pos_bboxes = candidates[ovs > cfg.FACE_OVERLAP, :]
if len(pos_bboxes) > 0:
np.random.shuffle(pos_bboxes)
for bbox in pos_bboxes[:cfg.POS_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('positive', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_positives.append((data, bbox, bbox_target))
random.shuffle(this_positives)
positives.extend(this_positives[:cfg.POS_PER_FACE])
# ===== proposal part faces =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_part = []
for scale in cfg.PART_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.PART_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
part_bboxes = candidates[np.logical_and(ovs > cfg.PARTFACE_OVERLAP, ovs <= cfg.FACE_OVERLAP), :]
if len(part_bboxes) > 0:
np.random.shuffle(part_bboxes)
for bbox in part_bboxes[:cfg.PART_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_part.append((data, bbox, bbox_target))
random.shuffle(this_part)
part.extend(this_part[:cfg.POS_PER_FACE])
# ===== proposal negatives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_negatives = []
for scale in cfg.NEG_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.NEG_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bboxes)
neg_bboxes = candidates[ovs.max(axis=1) < cfg.NONFACE_OVERLAP, :]
if len(neg_bboxes) > 0:
np.random.shuffle(neg_bboxes)
for bbox in neg_bboxes[:cfg.NEG_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('negative', data)
# cv2.waitKey()
this_negatives.append((data, bbox))
random.shuffle(this_negatives)
negatives.extend(this_negatives[:cfg.NEG_PER_FACE])
# negatives from global image random crop
max_num_from_fr = int(cfg.NEG_PER_IMAGE * cfg.NEG_FROM_FR_RATIO)
if len(negatives) > max_num_from_fr:
random.shuffle(negatives)
negatives = negatives[:max_num_from_fr]
bbox_neg = []
range_x, range_y = width - cfg.NEG_MIN_SIZE, height - cfg.NEG_MIN_SIZE
for i in xrange(cfg.NEG_PROPOSAL_RATIO * cfg.NEG_PER_IMAGE):
x1, y1 = np.random.randint(range_x), np.random.randint(range_y)
w = h = np.random.randint(low=cfg.NEG_MIN_SIZE, high=min(width-x1, height-y1))
x2, y2 = x1 + w, y1 + h
bbox_neg.append([x1, y1, x2, y2])
if x2 > width or y2 > height:
print 'hhhh'
bbox_neg = np.asarray(bbox_neg, dtype=gt_bboxes.dtype)
ovs = bbox_overlaps(bbox_neg, gt_bboxes)
bbox_neg = bbox_neg[ovs.max(axis=1) < cfg.NONFACE_OVERLAP]
np.random.shuffle(bbox_neg)
if not cfg.NEG_FORCE_BALANCE:
remain = cfg.NEG_PER_IMAGE - len(negatives)
else:
# balance ratio from face region and global crop
remain = len(negatives) * (1. - cfg.NEG_FROM_FR_RATIO) / cfg.NEG_FROM_FR_RATIO
remain = int(remain)
bbox_neg = bbox_neg[:remain]
# for bbox in bbox_neg:
# x1, y1, x2, y2 = bbox
# x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# cv2.imshow('neg', img)
# cv2.waitKey()
for bbox in bbox_neg:
data = crop_face(img, bbox)
negatives.append((data, bbox))
return negatives, positives, part
# =========== WIDER ================
def gen_wider():
logger.info('loading WIDER')
train_data, val_data = load_wider()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
train_faces = reduce(lambda acc, x: acc + len(x[1]), train_data, 0)
val_faces = reduce(lambda acc, x: acc + len(x[1]), val_data, 0)
logger.info('total faces, train: %d, val: %d', train_faces, val_faces)
def gen(data, db_names):
for db_name in db_names: remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=wider_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=wider_writer_func, args=(q_out, db_names))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
db_names = ['data/%snet_positive_train'%cfg.NET_TYPE,
'data/%snet_negative_train'%cfg.NET_TYPE,
'data/%snet_part_train'%cfg.NET_TYPE]
gen(train_data, db_names)
logger.info('writing val data, %d images', len(val_data))
db_names = ['data/%snet_positive_val'%cfg.NET_TYPE,
'data/%snet_negative_val'%cfg.NET_TYPE,
'data/%snet_part_val'%cfg.NET_TYPE]
gen(val_data, db_names)
def wider_reader_func(q_in, q_out):
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter % 1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bboxes = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
negatives, positives, part = proposal(img, bboxes, detector)
for data, _ in negatives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
q_out.put(('negative', [data]))
for data, _, bbox_target in positives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('positive', [data, bbox_target]))
for data, _, bbox_target in part:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('part', [data, bbox_target]))
def wider_writer_func(q_out, db_names):
db_pos = lmdb.open(db_names[0], map_size=G16)
db_neg = lmdb.open(db_names[1], map_size=G16)
db_part = lmdb.open(db_names[2], map_size=G16)
txn_pos = db_pos.begin(write=True)
txn_neg = db_neg.begin(write=True)
txn_part = db_part.begin(write=True)
idx_pos, idx_neg, idx_part = 0, 0, 0
q_pos, q_neg, q_part = [], [], []
def fill(txn, items, idx, has_bbox=True):
random.shuffle(items)
for item in items:
data_key = '%08d_data'%idx
txn.put(data_key, item[0])
if has_bbox:
bbox_key = '%08d_bbox'%idx
txn.put(bbox_key, item[1])
idx += 1
return idx
counter = 0
pos_counter, neg_counter, part_counter = 0, 0, 0
while True:
stat, item = q_out.get()
counter += 1
if counter % 10000 == 0:
logger.info('writes %d positives, %d negatives, %d part', pos_counter, neg_counter, part_counter)
if stat == 'positive':
pos_counter += 1
q_pos.append(item)
if len(q_pos) >= cfg.SHUFFLE_SIZE:
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
q_pos = []
elif stat == 'negative':
neg_counter += 1
q_neg.append(item)
if len(q_neg) >= cfg.SHUFFLE_SIZE:
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
q_neg = []
elif stat == 'part':
part_counter += 1
q_part.append(item)
if len(q_part) >= cfg.SHUFFLE_SIZE:
idx_part = fill(txn_part, q_part, idx_part, True)
q_part = []
else:
# stat == 'finish'
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
txn_pos.put('size', str(idx_pos))
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
txn_neg.put('size', str(idx_neg))
idx_part = fill(txn_part, q_part, idx_part, True)
txn_part.put('size', str(idx_part))
break
txn_pos.commit()
txn_neg.commit()
txn_part.commit()
db_pos.close()
db_neg.close()
db_part.close()
logger.info('Finish')
# =========== CelebA ===============
def gen_celeba():
logger.info('loading CelebA')
train_data, val_data = load_celeba()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
def gen(data, db_name):
remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=celeba_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=celeba_writer_func, args=(q_out, db_name))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
gen(train_data, 'data/%snet_landmark_train'%cfg.NET_TYPE)
logger.info('writing val data, %d images', len(val_data))
gen(val_data, 'data/%snet_landmark_val'%cfg.NET_TYPE)
def celeba_reader_func(q_in, q_out):
def vertify_bbox(bbox, landmark):
return True
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter%1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bbox, landmark = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
bbox = np.asarray(bbox, dtype=np.float32).reshape((1, -1))
_1, bboxes, _2 = proposal(img, bbox, detector)
np.random.shuffle(bboxes)
for data, bbox, _ in bboxes[:cfg.LANDMARK_PER_FACE]:
# make sure landmark points are in bbox
landmark1 = landmark.reshape((-1, 2)).copy()
if not vertify_bbox(bbox, landmark1):
continue
# # debug
# img1 = img.copy()
# x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
# cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 0, 255), 2)
# for x, y in landmark1:
# x, y = int(x), int(y)
# cv2.circle(img1, (x, y), 2, (0, 255, 0), -1)
# cv2.imshow('landmark', img1)
# cv2.waitKey(0)
# normalize landmark
w, h = bbox[2]-bbox[0], bbox[3]-bbox[1]
landmark1[:, 0] = (landmark1[:, 0] - bbox[0]) / w
landmark1[:, 1] = (landmark1[:, 1] - bbox[1]) / h
landmark1 = landmark1.reshape(-1)
# format data
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
landmark1 = landmark1.astype(np.float32).tostring() # float32
q_out.put(('data', [data, landmark1]))
def celeba_writer_func(q_out, db_name):
map_size = G16
db = lmdb.open(db_name, map_size=map_size)
counter = 0
with db.begin(write=True) as txn:
while True:
stat, item = q_out.get()
if stat == 'finish':
txn.put('size', str(counter))
break
data, landmark = item
data_key = '%08d_data'%counter
landmark_key = '%08d_landmark'%counter
txn.put(data_key, data)
txn.put(landmark_key, landmark)
counter += 1
if counter%1000 == 0:
logger.info('writes %d landmark faces', counter)
db.close()
logger.info('Finish')
def test():
os.system('rm -rf tmp/pos/*')
os.system('rm -rf tmp/neg/*')
os.system('rm -rf tmp/part/*')
logger.info('Load WIDER')
train_data, val_data = load_wider()
img_path, bboxes = train_data[np.random.choice(len(train_data))]
bboxes = np.asarray(bboxes)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
detector = JfdaDetector(cfg.PROPOSAL_NETS['r'])
negatives, positives, part = proposal(img, bboxes, detector)
logger.info('%d gt_bboxes', len(bboxes))
logger.info('%d negatives, %d positives, %d part', len(negatives), len(positives), len(part))
for i, (data, bbox_target) in enumerate(positives):
cv2.imwrite('tmp/pos/%03d.jpg'%i, data)
for i, (data) in enumerate(negatives):
cv2.imwrite('tmp/neg/%03d.jpg'%i, data)
for i, (data, bbox_target) in enumerate(part):
cv2.imwrite('tmp/part/%03d.jpg'%i, data)
cv2.imwrite('tmp/test.jpg', img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--net', type=str, default='p', help='net type')
parser.add_argument('--celeba', action='store_true', help='generate face data')
parser.add_argument('--wider', action='store_true', help='generate landmark data')
parser.add_argument('--gpu', type=int, default=0, help='gpu device')
parser.add_argument('--detect', action='store_true', help='use previous network detection')
parser.add_argument('--worker', type=int, default=8, help='workers to process the data')
parser.add_argument('--test', action='store_true', help='just simple test')
args = parser.parse_args()
cfg.GPU_ID = args.gpu
cfg.NET_TYPE = args.net
cfg.USE_DETECT = args.detect
cfg.WORKER_N = args.worker
if args.test:
test()
if args.wider:
gen_wider()
if args.celeba:
gen_celeba()
|
HiwinRA605_socket_ros_test_20190626130727.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
#Socket_command()
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
Socket_command()
print("cmd")
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
#Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
speed_mode_flag = False
point_data_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
print(111111111111)
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
feedback_check = TCP.feedback_check()
Socket.send(feedback_check.encode('utf-8'))#socket傳送for python to translate str
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
if start_input == 3:
rospy.on_shutdown(myhook)
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
classify_video.py | import cv2
import tensorflow as tf
import argparse
import threading
import multiprocessing
import time
import chunk_evaluator as evaluator
import slice_manager
import progressbar
def load_labels(labels_location='retrained_labels.txt'):
label = []
proto_as_ascii_lines = tf.gfile.GFile(labels_location).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def load_vidcap(file_location):
return cv2.VideoCapture(file_location)
def get_video_fps(vidcap):
return int(round(vidcap.get(cv2.CAP_PROP_FPS)))
def get_total_video_frames(vidcap):
return int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def skip_frames(vidcap, num_skip):
frame = None
for _ in range(0, num_skip):
_, frame = vidcap.read()
return frame
def process_frame(frame_image, graph_location, labels):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(graph_location, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
input_name = "import/input"
output_name = "import/final_result"
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
with tf.Session(graph=graph) as sess:
image_reader = tf.image.decode_jpeg(frame_image, channels=3, name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [224, 224])
normalized = tf.divide(tf.subtract(resized, [0]), [255])
try:
predictions = sess.run(normalized)
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: predictions})
top_k = results.argsort()[-5:][::-1]
result_dict = {}
for i in top_k[0]:
result_dict[labels[i]] = results[0][i]
return result_dict
except Exception as e:
print(e)
def load_and_process(frame_num, frame_to_load, all_frames):
_, encoded_frame = cv2.imencode('.jpg', frame_to_load)
if encoded_frame.any():
classification = process_frame(encoded_frame.tostring(), graph_file, labels)
all_frames[frame_num] = classification
if __name__ == "__main__":
label_file = 'retrained_labels.txt'
graph_file = 'retrained_graph.pb'
video_file = ''
parser = argparse.ArgumentParser()
parser.add_argument('--file', help="Video to be processed")
parser.add_argument('--graph_file', help='The graph file used to process the video file')
parser.add_argument('--label_file', help='The label file used to process the video file')
args = parser.parse_args()
if args.file:
video_file = args.file
if args.graph_file:
graph_file = args.graph_file
if args.label_file:
label_file = args.label_file
if video_file == '':
raise Exception("Please provide a video file to process")
labels = load_labels(label_file)
vidcap = load_vidcap(video_file)
fps = get_video_fps(vidcap)
success, frame = vidcap.read()
frame_skip = fps / 4
total_frames = get_total_video_frames(vidcap)
count = 0
frames = {}
threads = []
frame_types = {}
start = time.time()
manager = slice_manager.SliceManager()
with progressbar.ProgressBar(max_value=total_frames) as bar:
while success:
bar.update(count)
for i in range(0, multiprocessing.cpu_count()):
process = threading.Thread(target=load_and_process, args=(count, frame, frames))
process.start()
threads.append(process)
frame = skip_frames(vidcap, frame_skip)
count += frame_skip
for ii in range(len(threads)):
threads[ii].join()
evaluator.evaluate_chunk(frames, manager, count)
frames = {}
threads = []
if manager.should_add_slice(count):
manager.add_slice(manager.current_slice_start, count)
print("Finished evaluating")
print(manager.slices)
end = time.time()
print("Total time elapsed processing video: %f" %(end - start))
|
db_import_multiplexer.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""A loading-only EventMultiplexer that actually populates a SQLite DB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import threading
import time
import six
from six.moves import queue, xrange # pylint: disable=redefined-builtin
from tensorboard import data_compat
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.backend.event_processing import sqlite_writer
from tensorboard.compat import tf
class DbImportMultiplexer(object):
"""A loading-only `EventMultiplexer` that populates a SQLite DB.
This EventMultiplexer only loads data; it provides no read APIs.
"""
def __init__(self,
db_connection_provider,
purge_orphaned_data,
max_reload_threads,
use_import_op):
"""Constructor for `DbImportMultiplexer`.
Args:
db_connection_provider: Provider function for creating a DB connection.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
max_reload_threads: The max number of threads that TensorBoard can use
to reload runs. Each thread reloads one run at a time. If not provided,
reloads runs serially (one after another).
use_import_op: If True, use TensorFlow's import_event() op for imports,
otherwise use TensorBoard's own sqlite ingestion logic.
"""
tf.logging.info('DbImportMultiplexer initializing')
self._db_connection_provider = db_connection_provider
self._purge_orphaned_data = purge_orphaned_data
self._max_reload_threads = max_reload_threads
self._use_import_op = use_import_op
self._event_sink = None
self._run_loaders = {}
if self._purge_orphaned_data:
tf.logging.warning(
'--db_import does not yet support purging orphaned data')
conn = self._db_connection_provider()
# Extract the file path of the DB from the DB connection.
rows = conn.execute('PRAGMA database_list').fetchall()
db_name_to_path = {row[1]: row[2] for row in rows}
self._db_path = db_name_to_path['main']
tf.logging.info('DbImportMultiplexer using db_path %s', self._db_path)
# Set the DB in WAL mode so reads don't block writes.
conn.execute('PRAGMA journal_mode=wal')
conn.execute('PRAGMA synchronous=normal') # Recommended for WAL mode
sqlite_writer.initialize_schema(conn)
tf.logging.info('DbImportMultiplexer done initializing')
def _CreateEventSink(self):
if self._use_import_op:
return _ImportOpEventSink(self._db_path)
else:
return _SqliteWriterEventSink(self._db_connection_provider)
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory.
"""
tf.logging.info('Starting AddRunsFromDirectory: %s (as %s)', path, name)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
tf.logging.info('Processing directory %s', subdir)
if subdir not in self._run_loaders:
tf.logging.info('Creating DB loader for directory %s', subdir)
names = self._get_exp_and_run_names(path, subdir, name)
experiment_name, run_name = names
self._run_loaders[subdir] = _RunLoader(
subdir=subdir,
experiment_name=experiment_name,
run_name=run_name)
tf.logging.info('Done with AddRunsFromDirectory: %s', path)
def Reload(self):
"""Load events from every detected run."""
tf.logging.info('Beginning DbImportMultiplexer.Reload()')
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = self._CreateEventSink()
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
tf.logging.error('Unable to load run %r: %s', loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
tf.logging.info('Importing runs serially on a single thread')
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
tf.logging.info('Starting %d threads to import runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name='Loader %d' % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
tf.logging.warning('Deleting loader %r', loader.subdir)
del self._run_loaders[loader.subdir]
tf.logging.info('Finished with DbImportMultiplexer.Reload()')
def _get_exp_and_run_names(self, path, subdir, experiment_name_override=None):
if experiment_name_override is not None:
return (experiment_name_override, os.path.relpath(subdir, path))
sep = io_wrapper.PathSeparator(path)
path_parts = os.path.relpath(subdir, path).split(sep, 1)
experiment_name = path_parts[0]
run_name = path_parts[1] if len(path_parts) == 2 else '.'
return (experiment_name, run_name)
# Struct holding a list of tf.Event serialized protos along with metadata about
# the associated experiment and run.
_EventBatch = collections.namedtuple('EventBatch',
['events', 'experiment_name', 'run_name'])
class _RunLoader(object):
"""Loads a single run directory in batches."""
_BATCH_COUNT = 5000
_BATCH_BYTES = 2**20 # 1 MiB
def __init__(self, subdir, experiment_name, run_name):
"""Constructs a `_RunLoader`.
Args:
subdir: string, filesystem path of the run directory
experiment_name: string, name of the run's experiment
run_name: string, name of the run
"""
self._subdir = subdir
self._experiment_name = experiment_name
self._run_name = run_name
self._directory_watcher = directory_watcher.DirectoryWatcher(
subdir,
event_file_loader.RawEventFileLoader,
io_wrapper.IsTensorFlowEventsFile)
@property
def subdir(self):
return self._subdir
def load_batches(self):
"""Returns a batched event iterator over the run directory event files."""
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:
break
elapsed = time.time() - start
tf.logging.debug('RunLoader.load_batch() yielded in %0.3f sec for %s',
elapsed, self._subdir)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name)
class _EventSink(object):
"""Abstract sink for batches of serialized tf.Event data."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write_batch(self, event_batch):
"""Writes the given event batch to the sink.
Args:
event_batch: an _EventBatch of event data.
"""
raise NotImplementedError()
class _ImportOpEventSink(_EventSink):
"""Implementation of EventSink using TF's import_event() op."""
def __init__(self, db_path):
"""Constructs an ImportOpEventSink.
Args:
db_path: string, filesystem path of the DB file to open
"""
self._db_path = db_path
self._writer_fn_cache = {}
def _get_writer_fn(self, event_batch):
key = (event_batch.experiment_name, event_batch.run_name)
if key in self._writer_fn_cache:
return self._writer_fn_cache[key]
with tf.Graph().as_default():
placeholder = tf.placeholder(shape=[], dtype=tf.string)
writer = tf.contrib.summary.create_db_writer(
self._db_path,
experiment_name=event_batch.experiment_name,
run_name=event_batch.run_name)
with writer.as_default():
# TODO(nickfelt): running import_event() one record at a time is very
# slow; we should add an op that accepts a vector of records.
import_op = tf.contrib.summary.import_event(placeholder)
session = tf.Session()
session.run(writer.init())
def writer_fn(event_proto):
session.run(import_op, feed_dict={placeholder: event_proto})
self._writer_fn_cache[key] = writer_fn
return writer_fn
def write_batch(self, event_batch):
start = time.time()
writer_fn = self._get_writer_fn(event_batch)
for event_proto in event_batch.events:
writer_fn(event_proto)
elapsed = time.time() - start
tf.logging.debug(
'ImportOpEventSink.WriteBatch() took %0.3f sec for %s events', elapsed,
len(event_batch.events))
class _SqliteWriterEventSink(_EventSink):
"""Implementation of EventSink using SqliteWriter."""
def __init__(self, db_connection_provider):
"""Constructs a SqliteWriterEventSink.
Args:
db_connection_provider: Provider function for creating a DB connection.
"""
self._writer = sqlite_writer.SqliteWriter(db_connection_provider)
def write_batch(self, event_batch):
start = time.time()
tagged_data = {}
for event_proto in event_batch.events:
event = tf.Event.FromString(event_proto)
self._process_event(event, tagged_data)
if tagged_data:
self._writer.write_summaries(
tagged_data,
experiment_name=event_batch.experiment_name,
run_name=event_batch.run_name)
elapsed = time.time() - start
tf.logging.debug(
'SqliteWriterEventSink.WriteBatch() took %0.3f sec for %s events',
elapsed, len(event_batch.events))
def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
event_type = event.WhichOneof('what')
# Handle the most common case first.
if event_type == 'summary':
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(value.tag, (None, None, []))
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values)
elif event_type == 'file_version':
pass # TODO: reject file version < 2 (at loader level)
elif event_type == 'session_log':
if event.session_log.status == tf.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ('graph_def', 'meta_graph_def'):
pass # TODO: support graphs
elif event_type == 'tagged_run_metadata':
pass # TODO: support run metadata
|
read_from_device.py |
import serial
import time
import platform
import threading
import zephyr.message
import zephyr.protocol
import zephyr.signal
import zephyr.events
import zephyr.delayed_stream
import zephyr.testing
import zephyr.visualization
def callback(value_name, value):
if value_name == "acceleration":
print ["%010s" % ("%1.3f" % v) for v in value]
def main():
zephyr.configure_root_logger()
serial_port_dict = {"Darwin": "/dev/cu.BHBHT001931-iSerialPort1",
"Windows": 25}
serial_port = serial_port_dict[platform.system()]
ser = serial.Serial(serial_port)
collector = MeasurementCollector()
rr_signal_analysis = BioHarnessSignalAnalysis([], [collector.handle_event])
signal_packet_handlers = [collector.handle_signal, rr_signal_analysis.handle_signal]
signal_packet_handler = BioHarnessPacketHandler(signal_packet_handlers, [collector.handle_event])
payload_parser = MessagePayloadParser(signal_packet_handler.handle_packet)
delayed_stream_thread = DelayedRealTimeStream(collector, callback)
protocol = zephyr.protocol.BioHarnessProtocol(ser, payload_parser.handle_message)
protocol.enable_periodic_packets()
delayed_stream_thread.start()
threading.Thread(target=protocol.read_and_handle_forever).start()
visualization = zephyr.visualization.VisualizationWindow(signal_collector)
visualization.run()
stream_thread.terminate()
stream_thread.join()
if __name__ == "__main__":
main()
|
themes.py | from tkinter import *
from tkinter import ttk, messagebox
from halo import Halo
from threading import Thread
from ttkthemes import themed_tk as tk
from time import sleep
root = tk.ThemedTk()
root.geometry('800x600+500+150')
themes = root.get_themes()
def __change_theme():
while True:
for theme in themes:
try:
root.set_theme(theme)
lbltheme.config(text=f'Theme: {theme}')
sleep(3)
except RuntimeError:
exit(0)
lbltheme = ttk.Label(root, font=['Vendara', '16'])
btn1 = ttk.Button(root, text='Answer')
btn2 = ttk.Button(root, text='Decline')
btn3 = ttk.Button(root, text='Ignore')
btn4 = ttk.Button(root, text='Block')
btn5 = ttk.Button(root, text='Report')
lbltheme.pack()
btn1.pack(ipady=5, pady=5)
btn2.pack(ipady=5, pady=5)
btn3.pack(ipady=5, pady=5)
btn4.pack(ipady=5, pady=5)
btn5.pack(ipady=5, pady=5)
def start():
spinner = Halo(text='App is running', placement='right', text_color='green' , color='cyan')
spinner.animation
ct = Thread(target=__change_theme)
ct.start()
t = Thread(target=lambda:spinner.start())
t.start()
root.mainloop()
while True:
if root.quit:
spinner.stop()
exit(0)
start() |
callback.py | import json
import pycurl
from io import BytesIO
import multiprocessing
from urllib.parse import urlencode, unquote
from db_report.config import cmd_args, logger, translate as _
from bottle import route, run, request
@route('/', method='POST')
def bottle_callback():
"""
Callback handler
:return: response
:rtype: str
"""
logger.debug('%s: \'%s\'' % (_('callback received data'), unquote(request.body.read().decode())))
return '{"result": "Ok"}'
server = multiprocessing.Process(target=lambda: run(host='localhost', port=8080, quiet=True))
def curl_request(url, raw=False, head=None, post_data=None, post_file=None, post_upload=None, credentials=None,
raw_data=False, patch=False):
"""
Curl request
:param url: request url
:type url: str
:param raw: return raw result
:type raw: bool
:param head: request http headers
:type head: list
:param post_data: post parameters
:type post_data: dict
:param post_file: name of file with post data in format p0=v0&p1=v1&...
:type post_file: str
:param post_upload: files for upload
:type post_upload: list
:param credentials: HTTP basic authentication credentials
:type credentials: str
:param raw_data: send raw (not urlencoded) data
:type raw_data: bool
:param patch: method PATCH
:type patch: bool
:return: response
:rtype: dict
"""
post_fields = None
if post_file is not None:
try:
post_data = {parm.split('=')[0]: urlencode(parm.split('=')[1]) for parm in
open(post_file, 'r').read().split('&')}
except FileNotFoundError:
print('File %s not found' % post_file)
return None
c = pycurl.Curl()
buf = BytesIO()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.WRITEFUNCTION, buf.write)
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
if head is not None:
c.setopt(pycurl.HTTPHEADER, head)
if post_data is not None:
if not raw_data:
post_fields = urlencode(post_data)
else:
post_fields = post_data
if not patch:
c.setopt(c.POSTFIELDS, post_fields.encode())
if post_upload is not None:
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.HTTPPOST, post_upload)
if credentials is not None:
c.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
c.setopt(pycurl.USERPWD, credentials)
if patch:
c.setopt(pycurl.UPLOAD, 1)
c.setopt(pycurl.CUSTOMREQUEST, 'PATCH')
c.setopt(pycurl.READFUNCTION, BytesIO(post_fields.encode()).read)
c.setopt(pycurl.INFILESIZE, len(post_fields.encode()))
c.perform()
c.close()
try:
response = json.loads(buf.getvalue().decode('UTF-8')) if not raw else buf.getvalue()
except json.JSONDecodeError:
response = {'result': buf.getvalue().decode('UTF-8')}
return response
def callback(parameters):
"""
Post callback
:param parameters: post parameters for callback
:type parameters: dict
:return response
:rtype: dict
"""
try:
if cmd_args.token:
parameters['token'] = cmd_args.token
response = curl_request(cmd_args.callback_url, post_data=parameters) if cmd_args.token is not None else None
else:
response = None
except Exception as exc:
response = {'result': str(exc)}
return response
def callback_terminate(code, parameters):
"""
Terminate program
:param code: exit code
:type code: int
:param parameters: post parameters for callback
:type parameters: dict
"""
parameters['status'] = code
if cmd_args.token:
response = callback(parameters)
logger.debug('%s: %s' % (_('callback response'), response.get('result', 'None')))
if server.is_alive():
server.terminate()
server.join()
logger.debug('%s: %s' % (_('reporter result'), json.dumps(parameters)))
logger.info(_('reporter ended'))
exit(code)
|
sandbox.py | # Copyright 2014 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import fcntl
import logging
import os
import pwd
import random
import signal
import subprocess
import sys
import threading
import time
from catnip import _ext
from catnip import util
SANDBOX_ROOT = '/var/cache/catnip-node'
SANDBOX_CHROOT_DIR = os.path.join(SANDBOX_ROOT, 'chroot')
SANDBOX_HOME_DIR_IN_CHROOT = 'home/catnip-sandbox'
SANDBOX_COMMAND_FILE_IN_CHROOT = 'tmp/catnip-sandbox.command'
SANDBOX_LOCK_BASEDIR = os.path.join(SANDBOX_ROOT, 'lock')
SANDBOX_RUN_BASEDIR = os.path.join(SANDBOX_ROOT, 'run')
SANDBOX_MASTER_LOCK_FILE = os.path.join(SANDBOX_ROOT, 'lock', 'master')
SANDBOX_HEALTH_CHECK_LOCK_FILE = os.path.join(SANDBOX_ROOT,
'lock', 'health-check')
SANDBOX_SETUP_FILE = os.path.join(SANDBOX_ROOT, 'state', 'setup')
SANDBOX_HEALTH_FILE = os.path.join(SANDBOX_ROOT, 'state', 'health')
SANDBOX_USER = 'catnip-sandbox'
SANDBOX_AVAILABLE_PATHS = (
'/bin', '/etc', '/lib', '/lib32', '/lib64', '/opt', '/sbin', '/usr',
'/var/lib')
SANDBOX_AVAILABLE_DEVS = (
'full', 'null', 'random', 'stderr', 'stdin', 'stdout', 'urandom', 'zero')
IPC_CONFIG_FILE = '/usr/lib/catnip-node/ipc.conf'
NEWPID_PATH = '/usr/lib/catnip-node/newpid'
CGROUP_ROOT = '/sys/fs/cgroup'
CGROUP_SUBSYSTEMS = ('cpuacct', 'cpuset', 'memory')
CATNIP_SANDBOX_INIT_PATH = '/usr/lib/catnip-node/catnip-sandbox-init'
_LOG_FORMATTER = logging.Formatter(
fmt='%(levelname)s %(asctime)-15s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class InternalError(Exception):
pass
class SandboxParams(util.ValidatedStruct):
cpu = util.ValidatedProperty(type=int, validator=util.RangeValidator(0, 255))
share_cpu = util.ValidatedProperty(type=bool, default=False)
block = util.ValidatedProperty(type=bool, default=True)
disk_limit_kb = util.ValidatedProperty(
type=int, validator=util.RangeValidator(64, 4*1024*1024))
ignore_health = util.ValidatedProperty(type=bool, default=False)
debug = util.ValidatedProperty(type=bool, default=False)
class RunRequest(util.ValidatedStruct):
time_limit_sec = util.ValidatedProperty(
type=float, validator=util.RangeValidator(0, 24*60*60))
cpu_time_limit_sec = util.ValidatedProperty(
type=float, validator=util.RangeValidator(0, 24*60*60))
memory_limit_kb = util.ValidatedProperty(
type=int, validator=util.RangeValidator(0, 4*1024*1024))
output_limit_kb = util.ValidatedProperty(
type=int, validator=util.RangeValidator(0, 4*1024*1024))
command = util.ValidatedProperty(type=str)
class RunResponse(object):
def __init__(self, status, output_file):
self._status = status
self._output_file = output_file
status = property(lambda self: self._status)
output_file = property(lambda self: self._output_file)
class SandboxMaster(object):
def __init__(self):
pass
def Open(self, params, disk_image_stream):
return Sandbox(params, disk_image_stream)
def GetStatus(self):
with util.Lock(SANDBOX_MASTER_LOCK_FILE, fcntl.LOCK_SH):
status = {}
status['status'] = self._GetMasterStatus()
status['sandbox'] = []
try:
for sandbox_id in xrange(256):
status['sandbox'].append(self._GetSandboxStatus(sandbox_id))
except KeyError:
pass
return status
def _GetMasterStatus(self):
if not os.path.exists(SANDBOX_SETUP_FILE):
return 'shutdown'
try:
with util.Lock(
SANDBOX_HEALTH_CHECK_LOCK_FILE, fcntl.LOCK_SH|fcntl.LOCK_NB):
with open(SANDBOX_HEALTH_FILE) as f:
return f.readline().strip()
except util.LockFailed:
return 'warmup'
except IOError:
return 'warmup'
def _GetSandboxStatus(self, sandbox_id):
# Throws KeyError when the user does not exist.
pwd.getpwnam('catnip%d' % sandbox_id)
try:
with util.Lock(os.path.join(SANDBOX_LOCK_BASEDIR, '%d' % sandbox_id),
fcntl.LOCK_SH|fcntl.LOCK_NB):
pass
except util.LockFailed:
return {'status': 'running'}
else:
return {'status': 'ready'}
class Sandbox(object):
def __init__(self, params, disk_image_stream):
assert isinstance(params, SandboxParams)
if not params.Validate():
raise InternalError('Invalid SandboxParams')
self._params = params
self._sandbox_mount_id = 'catnip-sandbox%d' % params.cpu
self._sandbox_lock_file = os.path.join(
SANDBOX_LOCK_BASEDIR, '%d' % params.cpu)
self._sandbox_output_file = os.path.join(
SANDBOX_RUN_BASEDIR, '%d.output' % params.cpu)
self._cgroup_id = self._GenerateRandomID()
self._cgroup_dir = os.path.join(
CGROUP_ROOT, 'cpuset', self._cgroup_id)
self._cgroup_dirs = {}
for subsystem in CGROUP_SUBSYSTEMS:
self._cgroup_dirs[subsystem] = os.path.join(
CGROUP_ROOT, subsystem, self._cgroup_id)
self._log = logging.getLogger('catnip.sandbox')
if not self._params.debug:
self._log.setLevel(logging.CRITICAL)
else:
self._log.setLevel(logging.INFO)
log_handler = logging.StreamHandler(sys.stderr)
log_handler.setFormatter(_LOG_FORMATTER)
self._log.addHandler(log_handler)
self._context = self._Context(disk_image_stream)
self._context.next()
##############################################################################
## Setup
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.Close()
return False
def Close(self):
if self._context:
try:
self._context.next()
except StopIteration:
pass
self._context = None
def _Context(self, disk_image_stream):
self._SetCpuAffinity(self._params.cpu)
with self._ConnectionWatcher():
with self._EnvironmentSwap():
with self._SandboxLock():
self._Unshare()
self._ConfigureIpc()
self._UnmountAll()
self._SetupChroot()
self._InitializeDisk(disk_image_stream)
yield
def _SetCpuAffinity(self, cpu):
if isinstance(cpu, int):
cpu = [cpu]
cpu = ','.join(map(str, cpu))
if 0 != self._RunSilently(['taskset', '-cp', cpu, str(os.getpid())]):
raise InternalError('Invalid CPU specification')
def _ConnectionWatcher(self):
@contextlib.contextmanager
def Watcher():
self._log.info('starting connection watcher...')
lead_pid = self._FindLeadProcess()
self._log.info('lead_pid: %s', lead_pid)
if not lead_pid:
return
cancelled = False
def WatchThread():
while self._GetParentPID(lead_pid) > 1 and not cancelled:
time.sleep(0.5)
while not cancelled:
self._KillallCgroup()
time.sleep(0.5)
watcher = threading.Thread(target=WatchThread)
watcher.daemon = True
watcher.start()
self._log.info('connection watcher started.')
yield
cancelled = True
self._log.info('connection watcher stopped.')
return Watcher()
def _FindLeadProcess(self):
sudo_pid = os.getpgid(0)
self._log.info('sudo_pid: %d', sudo_pid)
ssh_pid = self._GetParentPID(sudo_pid)
self._log.info('ssh_pid: %d', ssh_pid)
if not ssh_pid:
raise InternalError('Could not find the lead process')
try:
with open('/proc/%d/cmdline' % ssh_pid) as f:
ssh_cmdline = f.read()
except IOError:
raise InternalError('Could not find the lead process')
self._log.info('ssh_cmdline: %s', ssh_cmdline)
if not ssh_cmdline.startswith('sshd: '):
if self._params.debug:
self._log.info('skipping SSH invocation check by --debug')
return None
raise InternalError('Not invoked from SSH')
return sudo_pid
def _EnvironmentSwap(self):
@contextlib.contextmanager
def Swapper():
self._log.info('setting umask/PATH')
last_umask = os.umask(022)
last_path = os.environ['PATH']
os.environ['PATH'] = '/usr/sbin:/usr/bin:/sbin:/bin'
yield
self._log.info('recovering umask/PATH')
os.environ['PATH'] = last_path
os.umask(last_umask)
return Swapper()
def _SandboxLock(self):
@contextlib.contextmanager
def Locker():
self._log.info('locking sandbox %d', self._params.cpu)
with util.Lock(SANDBOX_MASTER_LOCK_FILE, fcntl.LOCK_SH):
if not os.path.exists(SANDBOX_SETUP_FILE):
raise InternalError('Sandbox is not ready')
if not self._params.ignore_health:
try:
with util.Lock(
SANDBOX_HEALTH_CHECK_LOCK_FILE, fcntl.LOCK_SH|fcntl.LOCK_NB):
with open(SANDBOX_HEALTH_FILE) as f:
health = (f.readline().strip() == 'healthy')
except util.LockFailed:
# A health checker is running.
health = None
except IOError:
# No health checker has run yet.
health = None
if health is None:
raise InternalError('Sandbox is not ready')
if health is False:
raise InternalError('Sandbox is unhealthy')
lock_mode = fcntl.LOCK_SH if self._params.share_cpu else fcntl.LOCK_EX
if not self._params.block:
lock_mode |= fcntl.LOCK_NB
try:
with util.Lock(self._sandbox_lock_file, lock_mode):
self._log.info('locked sandbox %d', self._params.cpu)
yield
self._log.info('unlocking sandbox %d', self._params.cpu)
except util.LockFailed:
raise InternalError('Sandbox is locked')
self._log.info('unlocked sandbox %d', self._params.cpu)
return Locker()
def _Unshare(self):
self._log.info('unsharing...')
_ext.unshare(_ext.CLONE_NEWIPC|_ext.CLONE_NEWNET|_ext.CLONE_NEWNS|
_ext.CLONE_NEWUTS)
def _ConfigureIpc(self):
if 0 != self._RunSilently(['sysctl', '-p', IPC_CONFIG_FILE]):
raise InternalError('Failed to configure IPC')
def _UnmountAll(self):
self._log.info('unmounting filesystems...')
paths = []
path_whitelist = tuple(
['/', '/dev', '/proc', '/sys', CGROUP_ROOT] +
[os.path.join(CGROUP_ROOT, subsystem)
for subsystem in CGROUP_SUBSYSTEMS])
with open('/proc/mounts') as f:
for line in f:
path = line.split()[1]
if path not in path_whitelist:
paths.append(path)
paths.sort(key=lambda p: len(p), reverse=True)
for path in paths:
self._LazyUnmount(path)
def _SetupChroot(self):
self._log.info('setting up chroot...')
# Create /
self._Mount(
'tmpfs',
'nosuid,nodev,size=%dk,uid=0,gid=0,mode=755' % (
self._params.disk_limit_kb),
'none',
SANDBOX_CHROOT_DIR)
# Create /dev
os.mkdir(os.path.join(SANDBOX_CHROOT_DIR, 'dev'))
self._Mount(
'tmpfs',
'nosuid,size=64k,uid=0,gid=0,mode=755',
'none',
os.path.join(SANDBOX_CHROOT_DIR, 'dev'))
for dev in SANDBOX_AVAILABLE_DEVS:
oldpath = os.path.join('/dev', dev)
newpath = os.path.join(SANDBOX_CHROOT_DIR, 'dev', dev)
if os.path.islink(oldpath):
linkpath = os.readlink(oldpath)
os.symlink(linkpath, newpath)
else:
with open(newpath, 'w'):
pass
self._BindMount(oldpath, newpath)
# Mount other available paths
for path in SANDBOX_AVAILABLE_PATHS:
if not os.path.isdir(path):
continue
path = path.strip('/')
os.makedirs(os.path.join(SANDBOX_CHROOT_DIR, path))
self._BindMount(
os.path.join('/', path), os.path.join(SANDBOX_CHROOT_DIR, path),
options='nosuid,nodev,ro')
# Create /tmp, /var/tmp
os.mkdir(os.path.join(SANDBOX_CHROOT_DIR, 'tmp'))
self._RunSilently(
['chmod', '1777', os.path.join(SANDBOX_CHROOT_DIR, 'tmp')])
os.symlink(os.path.join(os.pardir, 'tmp'),
os.path.join(SANDBOX_CHROOT_DIR, 'var', 'tmp'))
# Create /proc placeholder - mounted after CLONE_NEWPID
os.mkdir(os.path.join(SANDBOX_CHROOT_DIR, 'proc'))
# Create /home/catnip-sandbox
os.makedirs(os.path.join(SANDBOX_CHROOT_DIR, SANDBOX_HOME_DIR_IN_CHROOT))
self._RunSilently(
['chown', 'catnip-sandbox.',
os.path.join(SANDBOX_CHROOT_DIR, SANDBOX_HOME_DIR_IN_CHROOT)])
def _InitializeDisk(self, disk_image_stream):
tar_shell_command = (
'cd /%(sandbox_home)s; '
'exec sudo -u %(sandbox_user)s tar --no-overwrite-dir -x') % {
'sandbox_home': SANDBOX_HOME_DIR_IN_CHROOT,
'sandbox_user': SANDBOX_USER,
}
if self._params.debug:
tar_shell_command += ' -v'
args = ['chroot', SANDBOX_CHROOT_DIR, NEWPID_PATH,
'bash', '-c', tar_shell_command]
self._log.info('extracting tarball: %s', ' '.join(args))
try:
with open(os.devnull, 'w') as null:
out = sys.stderr if self._params.debug else null
returncode = subprocess.call(args,
close_fds=True,
stdin=disk_image_stream,
stdout=out,
stderr=out)
self._log.info('return code: %d', returncode)
if returncode != 0:
raise InternalError('Tar command failed')
except OSError:
raise InternalError('Failed to run tar')
##############################################################################
## Execution
def Execute(self, request):
assert isinstance(request, RunRequest)
if not request.Validate():
raise InternalError('Invalid RunRequest')
with self._Cgroup(request):
main_proc, limiter_proc, start_time = self._SpawnProgram(request)
killer = None
end_time = None
try:
killer = self._MaybeLaunchTimeoutKiller(main_proc, start_time, request)
self._log.info('waiting for the main program to finish...')
main_proc.wait()
end_time = time.time()
self._log.info('program finished.')
finally:
if killer:
killer.Cancel()
self._KillallCgroup()
self._log.info('waiting for the limiter to finish...')
limiter_proc.wait()
if end_time is None:
runtime = None
else:
runtime = end_time - start_time
self._log.info('success.')
return self._BuildResponse(request, main_proc.returncode, runtime)
def _Cgroup(self, request):
@contextlib.contextmanager
def Cgroup():
self._log.info('setting up cgroup...')
for subsystem, cgroup_dir in self._cgroup_dirs.iteritems():
try:
os.mkdir(cgroup_dir, 0700)
except OSError:
raise InternalError('Failed to create a cgroup: %s' % subsystem)
try:
self._SetCgroupParam('cpuset.cpus', self._params.cpu)
self._SetCgroupParam('cpuset.mems', 0)
if request.memory_limit_kb > 0:
self._SetCgroupParam('memory.limit_in_bytes',
request.memory_limit_kb * 1024)
except IOError:
raise InternalError('Failed to set up a cgroup')
yield
for subsystem, cgroup_dir in self._cgroup_dirs.iteritems():
try:
os.rmdir(cgroup_dir)
except OSError:
pass
return Cgroup()
def _SetCgroupParam(self, name, value):
self._log.info('%s = %s', name, value)
subsystem = name.split('.')[0]
with open(os.path.join(self._cgroup_dirs[subsystem], name), 'w') as f:
print >>f, value
def _SpawnProgram(self, request):
try:
with open(os.path.join(SANDBOX_CHROOT_DIR,
SANDBOX_COMMAND_FILE_IN_CHROOT), 'w') as f:
print >>f, '#!/bin/bash'
print >>f, request.command
os.fchmod(f.fileno(), 0755)
except IOError:
raise InternalError('Failed to prepare command')
except OSError:
raise InternalError('Failed to prepare command')
self._log.info('command line: %s', request.command)
main_shell_command = (
'for cgroup_dir in %(cgroup_dirs)s; do '
'echo $$ > $cgroup_dir/tasks; done;'
'echo -1000 > /proc/self/oom_score_adj;'
'chroot %(chroot_dir)s '
'%(newpid_path)s '
'%(catnip_sandbox_init_path)s') % {
'cgroup_dirs': ' '.join(self._cgroup_dirs.values()),
'chroot_dir': SANDBOX_CHROOT_DIR,
'newpid_path': NEWPID_PATH,
'catnip_sandbox_init_path': CATNIP_SANDBOX_INIT_PATH,
}
if request.output_limit_kb > 0:
limiter_args = ['head', '-c', '%dK' % request.output_limit_kb]
else:
limiter_args = ['cat']
try:
os.unlink(self._sandbox_output_file)
except OSError:
pass
try:
with open(os.devnull, 'w') as null:
with open(self._sandbox_output_file, 'w') as out:
try:
self._log.info('invoking limiter: %s', ' '.join(limiter_args))
limiter_proc = subprocess.Popen(
limiter_args, close_fds=True, stdin=subprocess.PIPE, stdout=out,
stderr=null)
except OSError:
raise InternalError('Failed to run limiter')
start_time = time.time()
try:
self._log.info('invoking main program: %s', main_shell_command)
main_proc = subprocess.Popen(
[main_shell_command], shell=True, close_fds=True, stdin=null,
stdout=limiter_proc.stdin, stderr=null)
except OSError:
self._log.info('oops, main program invocation failed.')
limiter_proc.kill()
limiter_proc.wait()
raise InternalError('Failed to run the main program')
finally:
limiter_proc.stdin.close()
self._log.info('spawned the main program')
return (main_proc, limiter_proc, start_time)
except IOError:
raise InternalError('Failed to open the output file')
def _MaybeLaunchTimeoutKiller(self, proc, start_time, request):
if request.time_limit_sec == 0 and request.cpu_time_limit_sec == 0:
self._log.info('timeout killer suppressed')
return None
if request.time_limit_sec > 0:
time_limit_deadline = start_time + request.time_limit_sec
else:
time_limit_deadline = 0
killer = TimeoutKiller(
proc, time_limit_deadline, request.cpu_time_limit_sec,
os.path.join(self._cgroup_dirs['cpuacct'], 'cpuacct.usage'))
killer.daemon = True
killer.start()
self._log.info('started timeout killer')
return killer
def _BuildResponse(self, request, returncode, runtime):
if returncode < 0:
returncode = 128 - returncode
try:
with open(os.path.join(
self._cgroup_dirs['cpuacct'], 'cpuacct.usage')) as f:
cpu_usage = float(f.read().strip()) / 1e9
except IOError:
raise InternalError('Failed to read cpuacct.usage')
try:
with open(os.path.join(
self._cgroup_dirs['memory'], 'memory.max_usage_in_bytes')) as f:
memory_usage = int(f.read().strip()) / 1024
except IOError:
raise InternalError('Failed to read memory.max_usage_in_bytes')
try:
output_length = os.stat(self._sandbox_output_file).st_size
except OSError:
output_length = 0
status = {
'returncode': returncode,
'time': runtime,
'cputime': cpu_usage,
'memory': memory_usage,
'length': output_length,
'command': request.command,
}
return RunResponse(status, self._sandbox_output_file)
##############################################################################
## Utilities
def _KillallCgroup(self):
self._log.info('killing all cgroup processes...')
while True:
try:
with open(os.path.join(self._cgroup_dir, 'tasks')) as f:
pids = map(int, f.read().split())
if not pids:
break
for pid in pids:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
self._log.info('killed %s', ', '.join(map(str, pids)))
except IOError:
pass
def _Mount(self, fstype, options, oldpath, newpath):
returncode = self._RunSilently(
['mount', '-n', '-t', fstype, '-o', options, oldpath, newpath])
if returncode != 0:
raise InternalError('Failed to mount: %s' % newpath)
def _LazyUnmount(self, path):
returncode = self._RunSilently(['umount', '-n', '-l', path])
if returncode != 0:
raise InternalError('Failed to unmount: %s' % path)
def _BindMount(self, oldpath, newpath, options=''):
returncode = self._RunSilently(
['mount', '-n', '--bind', oldpath, newpath])
if returncode != 0:
raise InternalError('Failed to bind mount: %s' % newpath)
returncode = self._RunSilently(
['mount', '-n', '-o', 'remount,bind,%s' % options, oldpath, newpath])
if returncode != 0:
raise InternalError('Failed to bind mount: %s' % newpath)
def _RunSilently(self, args):
self._log.info('running silently: %s', ' '.join(args))
try:
with open(os.devnull, 'w') as null:
out = sys.stderr if self._params.debug else null
return subprocess.call(args, close_fds=True,
stdin=null, stdout=out, stderr=out)
except OSError:
raise InternalError('Failed to run %s' % args[0])
def _GetParentPID(self, pid):
try:
with open('/proc/%d/stat' % pid) as f:
line = f.read().strip()
info = line.rsplit(') ', 1)[-1].split()
return int(info[1])
except:
return None
def _GenerateRandomID(self):
return ''.join([random.choice('0123456789abcdef') for _ in xrange(32)])
class TimeoutKiller(threading.Thread):
def __init__(self, proc, time_limit_deadline, cpu_time_limit,
cpuacct_usage_path):
super(TimeoutKiller, self).__init__()
self._proc = proc
self._time_limit_deadline = time_limit_deadline
self._cpu_time_limit = cpu_time_limit
self._cpuacct_usage_path = cpuacct_usage_path
self._running = True
def run(self):
with open(os.path.join(self._cpuacct_usage_path)) as cpuacct_usage:
while self._running:
cpuacct_usage.seek(0)
if ((self._time_limit_deadline > 0 and
time.time() > self._time_limit_deadline) or
(self._cpu_time_limit > 0 and
float(cpuacct_usage.read()) / 1e9 > self._cpu_time_limit)):
try:
self._proc.kill()
except OSError:
pass
break
time.sleep(0.1)
def Cancel(self):
self._running = False
|
modelservicecontext.py | #Data : 2018-10-15
#Author : Fengyuan Zhang (Franklin)
#Email : franklinzhang@foxmail.com
from enum import Enum
import socket
import time
import threading
import json
import os
import sys
import zipfile
class EModelContextStatus(Enum):
EMCS_INIT_BEGIN = 1,
EMCS_INIT = 2,
EMCS_INIT_END = 3,
EMCS_STATE_ENTER_BEGIN = 4,
EMCS_STATE_ENTER = 5,
EMCS_STATE_ENTER_END = 6,
EMCS_EVENT_BEGIN = 7,
EMCS_EVENT = 8,
EMCS_EVENT_END = 9,
EMCS_REQUEST_BEGIN = 10,
EMCS_REQUEST = 11,
EMCS_REQUEST_END = 12,
EMCS_RESPONSE_BEGIN = 13,
EMCS_RESPONSE = 14,
EMCS_RESPONSE_END = 15,
EMCS_POST_BEGIN = 16,
EMCS_POST = 17,
EMCS_POST_END = 18,
EMCS_STATE_LEAVE_BEGIN = 19,
EMCS_STATE_LEAVE = 20,
EMCS_STATE_LEAVE_END = 21,
EMCS_FINALIZE_BEGIN = 22,
EMCS_FINALIZE = 23,
EMCS_FINALIZE_END = 24,
EMCS_COMMON_BEGIN = 25,
EMCS_COMMON_REQUEST = 26,
EMCS_COMMON_END = 27,
EMCS_INIT_CTRLPARAM_BEGIN = 28,
EMCS_INIT_CTRLPARAM = 29,
EMCS_INIT_CTRLPARAM_END = 30,
EMCS_UNKOWN = 0
class ERequestResponseDataFlag(Enum):
ERDF_OK = 1,
ERDF_NOTREADY = 2,
ERDF_ERROR = -1,
ERDF_UNKNOWN = 0
class ERequestResponseDataMIME(Enum):
ERDM_XML_STREAM = 1,
ERDM_ZIP_STREAM = 2
ERDM_RAW_STREAM = 3,
ERDM_XML_FILE = 4,
ERDM_ZIP_FILE = 5,
ERDM_RAW_FILE = 6,
ERDM_UNKNOWN = 0
class ModelServiceContext:
def __init__(self):
self.mPort = 6000
self.mHost = '127.0.0.1'
self.mInstanceID = ''
self.mClientSocket = None
self.mMornitoringThread = None
self.mDebugScriptFile = ''
self.mStatus = EModelContextStatus.EMCS_UNKOWN
self.mData = ''
self.mMappingLibDir = ''
self.mInstanceDir = ''
self.mCurrentState = ''
self.mCurrentEvent = ''
self.mRequestDataFlag = ERequestResponseDataFlag.ERDF_UNKNOWN
self.mRequestDataBody = ''
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
self.mResponseDataFlag = ERequestResponseDataFlag.ERDF_UNKNOWN
self.mResponseDataBody = ''
self.mResponseDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
self.mProcessParams = {}
self.mControlParams = {}
def _bindSocket(self):
self.mClientSocket = socket.socket()
try:
self.mClientSocket.connect((self.mHost, int(self.mPort)))
except ZeroDivisionError as ex:
return -1
return 1
def _sendMessage(self, message):
self.mClientSocket.sendall(message)
def _receiveMessage(self):
msg = str(self.mClientSocket.recv(10240))
# print '[MSG]:' + msg
return msg
def _wait4Status(self, status, timeout = 72000):
time_end = time.time() + timeout
while True:
time.sleep(0.01)
if self.mStatus == status or time.time() > time_end :
return 1
def _resetRequestDataInfo(self):
self.mRequestDataBody = ''
self.mRequestDataFlag = ERequestResponseDataFlag.ERDF_UNKNOWN
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
def _resetResponseDataInfo(self):
self.mRequestDataBody = ''
self.mRequestDataFlag = ERequestResponseDataFlag.ERDF_UNKNOWN
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
def _sendProcessParam(self):
self._sendMessage('{ProcessParams}' + self.mInstanceID + '&' + json.dumps(self.mProcessParams))
def onInitialize(self, host, port, instanceID):
self.mHost = host
self.mPort = port
self.mInstanceID = instanceID
self.mStatus = EModelContextStatus.EMCS_INIT_BEGIN
if self._bindSocket() == 1:
# start monitoring thread
self.mMornitoringThread = threading.Thread(target=ModelServiceContext.Monitoring_thread, name='Monitoring', args=(self,))
self.mStatus = EModelContextStatus.EMCS_INIT
if self.mMornitoringThread == None:
print('error in create thread!')
return exit()
self.mMornitoringThread.start()
self._sendMessage('{init}' + self.mInstanceID + '&' + self.mDebugScriptFile)
self._wait4Status(EModelContextStatus.EMCS_INIT_END)
startPos = self.mData.index('[')
endPos = self.mData.index(']')
self.mMappingLibDir = self.mData[startPos + 1 : endPos]
self.mData = self.mData[endPos + 1 : ]
startPos = self.mData.index('[')
endPos = self.mData.index(']')
self.mInstanceDir = self.mData[startPos + 1 : endPos]
else:
print('Init Failed! Cannot Connect Model Service Container')
return exit()
def onEnterState(self, stateId):
self.mStatus = EModelContextStatus.EMCS_STATE_ENTER_BEGIN
self.mCurrentState = stateId
self._sendMessage('{onEnterState}' + self.mInstanceID + '&' + stateId)
self.mStatus = EModelContextStatus.EMCS_STATE_ENTER
self._wait4Status(EModelContextStatus.EMCS_STATE_ENTER_END)
return 0
def onFireEvent(self, eventName):
self.mStatus = EModelContextStatus.EMCS_EVENT_BEGIN
self.mCurrentEvent = eventName
self._sendMessage('{onFireEvent}' + self.mInstanceID + "&" + self.mCurrentState + "&" + eventName)
self.mStatus = EModelContextStatus.EMCS_EVENT
self._wait4Status(EModelContextStatus.EMCS_EVENT_END)
def onRequestData(self):
self._resetRequestDataInfo()
if self.mCurrentState == '' or self.mCurrentEvent == '':
return -1
self._resetRequestDataInfo()
self.mStatus = EModelContextStatus.EMCS_REQUEST_BEGIN
self._sendMessage('{onRequestData}' + self.mInstanceID + '&' + self.mCurrentState + '&' + self.mCurrentEvent)
self._wait4Status(EModelContextStatus.EMCS_REQUEST_END)
posBegin = self.mData.index('[')
posEnd = self.mData.index(']')
dataFlag = self.mData[posBegin + 1 : posEnd - posBegin]
dataRemained = self.mData[posEnd + 1 : ]
if dataFlag == 'OK':
self.mRequestDataFlag = ERequestResponseDataFlag.ERDF_OK
else:
self.mRequestDataFlag = ERequestResponseDataFlag.ERDF_ERROR
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
return 0
posBegin = dataRemained.index('[')
posEnd = dataRemained.index(']')
dataMIME = dataRemained[posBegin + 1 : posEnd - posBegin]
if dataMIME == 'XML|STREAM':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_XML_STREAM
elif dataMIME == 'ZIP|STREAM':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_ZIP_STREAM
elif dataMIME == 'RAW|STREAM':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_RAW_STREAM
elif dataMIME == 'XML|FILE':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_XML_FILE
elif dataMIME == 'ZIP|FILE':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_ZIP_FILE
elif dataMIME == 'RAW|FILE':
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_RAW_FILE
else:
self.mRequestDataMIME = ERequestResponseDataMIME.ERDM_UNKNOWN
self.mRequestDataBody = dataRemained[posEnd + 1 : ]
def onResponseData(self):
self.mStatus = EModelContextStatus.EMCS_RESPONSE_BEGIN
if self.mResponseDataFlag == ERequestResponseDataFlag.ERDF_OK:
mime = ''
if self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_XML_STREAM:
mime = '[XML|STREAM]'
elif self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_ZIP_STREAM:
mime = '[ZIP|STREAM]'
elif self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_RAW_STREAM:
mime = '[RAW|STREAM]'
elif self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_XML_FILE:
mime = '[XML|FILE]'
elif self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_ZIP_FILE:
mime = '[ZIP|FILE]'
elif self.mResponseDataMIME == ERequestResponseDataMIME.ERDM_RAW_FILE:
mime = '[RAW|FILE]'
else:
mime = '[UNKNOWN]'
self._sendMessage('{onResponseData}' + self.mInstanceID + '&' + self.mCurrentState + '&' + self.mCurrentEvent + '&' + str(len(self.mResponseDataBody)) + '[OK]' + mime + self.mResponseDataBody )
self.mStatus = EModelContextStatus.EMCS_RESPONSE
self._wait4Status(EModelContextStatus.EMCS_RESPONSE_END)
elif self.mResponseDataFlag == ERequestResponseDataFlag.ERDF_ERROR:
self._sendMessage('{onResponseData}' + self.mInstanceID + '&' + self.mCurrentState + '&' + self.mCurrentEvent + '&0[ERROR]' )
self.mStatus = EModelContextStatus.EMCS_RESPONSE
self._wait4Status(EModelContextStatus.EMCS_RESPONSE_END)
elif self.mResponseDataFlag == ERequestResponseDataFlag.ERDF_NOTREADY:
self._sendMessage('{onResponseData}' + self.mInstanceID + '&' + self.mCurrentState + '&' + self.mCurrentEvent + '&' + len(self.mResponseDataBody))
self.mStatus = EModelContextStatus.EMCS_RESPONSE
self._wait4Status(EModelContextStatus.EMCS_RESPONSE_END)
self._resetResponseDataInfo()
def onPostErrorInfo(self, errinfo):
self.mStatus = EModelContextStatus.EMCS_POST_BEGIN
self._sendMessage('{onPostErrorInfo}' + self.mInstanceID + '&' + errinfo)
self.mStatus = EModelContextStatus.EMCS_POST
self._wait4Status(EModelContextStatus.EMCS_POST_END)
def onPostWarningInfo(self, warninginfo):
self.mStatus = EModelContextStatus.EMCS_POST_BEGIN
self._sendMessage('{onPostMessageInfo}' + self.mInstanceID + '&' + warninginfo)
self.mStatus = EModelContextStatus.EMCS_POST
self._wait4Status(EModelContextStatus.EMCS_POST_END)
def onPostMessageInfo(self, messageinfo):
self.mStatus = EModelContextStatus.EMCS_POST_BEGIN
self._sendMessage('{onPostMessageInfo}' + self.mInstanceID + '&' + messageinfo)
self.mStatus = EModelContextStatus.EMCS_POST
self._wait4Status(EModelContextStatus.EMCS_POST_END)
def onLeaveState(self):
self.mStatus = EModelContextStatus.EMCS_STATE_LEAVE_BEGIN
self._sendMessage('{onLeaveState}' + self.mInstanceID + '&' + self.mCurrentState)
self.mStatus = EModelContextStatus.EMCS_STATE_LEAVE
self._wait4Status(EModelContextStatus.EMCS_STATE_LEAVE_END)
def onFinalize(self):
self.mStatus = EModelContextStatus.EMCS_FINALIZE_BEGIN
self._sendMessage('{onFinalize}' + self.mInstanceID)
self.mStatus = EModelContextStatus.EMCS_FINALIZE
self._wait4Status(EModelContextStatus.EMCS_FINALIZE_END)
# self.mMornitoringThread.join()
sys.exit()
def onGetModelAssembly(self, methodName):
self.mStatus = EModelContextStatus.EMCS_COMMON_BEGIN
self._sendMessage('{onGetModelAssembly}' + self.mInstanceID + '&' + methodName)
self.mStatus = EModelContextStatus.EMCS_COMMON_REQUEST
self._wait4Status(EModelContextStatus.EMCS_COMMON_END)
assembly = self.mData[0 : self.mData.index('}') + 1]
return assembly
def initControlParam(self):
self.mStatus = EModelContextStatus.EMCS_INIT_CTRLPARAM_BEGIN
self._sendMessage('{onInitControlParam}' + self.mInstanceID)
self.mStatus = EModelContextStatus.EMCS_INIT_CTRLPARAM
self._wait4Status(EModelContextStatus.EMCS_INIT_CTRLPARAM_END)
posEnd = self.mData.index('&')
controlParamBuffer = self.mData[posEnd + 1 : ]
try:
self.mControlParams = json.loads(controlParamBuffer)
except ZeroDivisionError as ex:
pass
#Data
def getRequestDataFlag(self):
return self.mRequestDataFlag
def getRequestDataMIME(self):
return self.mRequestDataMIME
def getRequestDataBody(self):
return self.mRequestDataBody
def setResponseDataFlag(self, flag):
self.mResponseDataFlag = flag
def setResponseDataMIME(self, MIME):
self.mResponseDataMIME = MIME
def setResponseDataBody(self, body):
self.mResponseDataBody = body
def getResponseDataFlag(self):
return self.mResponseDataFlag
def getResponseDataMIME(self):
return self.mResponseDataMIME
def getResponseDataDody(self):
return self.mResponseDataBody
def getCurrentStatus(self):
return self.mStatus
def getProcessParam(self, key):
return self.mProcessParams.get(key, None)
def setProcessParam(self, key, value):
self.mProcessParams[key] = value
self._sendProcessParam()
# Directory
def getCurrentDataDirectory(self):
instanceDir = self.getModelInstanceDirectory()
if os.path.exists(instanceDir) == False:
os.makedirs(instanceDir)
stateDir = instanceDir + self.getCurrentRunningState() + '\\'
if os.path.exists(stateDir) == False:
os.makedirs(stateDir)
eventDir = stateDir + self.getCurrentRunningEvent() + '\\'
if os.path.exists(eventDir) == False:
os.makedirs(eventDir)
return eventDir
def getMappingLibraryDirectory(self):
if self.mMappingLibDir[:-1] != '\\':
self.mMappingLibDir = self.mMappingLibDir + '\\'
return self.mMappingLibDir
def getModelInstanceDirectory(self):
if self.mInstanceDir[:-1] != '\\':
self.mInstanceDir = self.mInstanceDir + '\\'
return self.mInstanceDir
def getCurrentRunningState(self):
return self.mCurrentState
def getCurrentRunningEvent(self):
return self.mCurrentEvent
def getDataFileByExt(self, ext):
ext = ext.lower()
dire = self.getCurrentDataDirectory()
list_files = os.listdir(dire)
list_f = []
for filepath in list_files:
if ext == ModelServiceContext.getFileExtension(filepath).lower():
list_f.append(dire + filepath)
return list_f
def mapZipToCurrentDataDirectory(self, zipf):
z = zipfile.ZipFile(zipf, 'r')
dire = self.getCurrentDataDirectory()
z.extractall(path=dire)
z.close()
@staticmethod
def getFileExtension(path):
return os.path.splitext(path)[1][1:]
@staticmethod
def Monitoring_thread(ms):
while True:
data = ms._receiveMessage()
strCmds = data.split('\n')
for cmd in strCmds:
header = cmd[0 : cmd.index('}') + 1]
ms.mData = cmd[cmd.index('}') + 1 : ]
if header == '{Initialized}':
ms.mStatus = EModelContextStatus.EMCS_INIT_END
elif header == '{Enter State Notified}':
ms.mStatus = EModelContextStatus.EMCS_STATE_ENTER_END
elif header == '{Fire Event Notified}':
ms.mStatus = EModelContextStatus.EMCS_EVENT_END
elif header == '{Request Data Notified}':
ms.mStatus = EModelContextStatus.EMCS_REQUEST_END
elif header == '{Response Data Notified}':
ms.mStatus = EModelContextStatus.EMCS_RESPONSE_END
elif header == '{Response Data Received}':
ms.mStatus = EModelContextStatus.EMCS_RESPONSE_END
elif header == '{Post Error Info Notified}':
ms.mStatus = EModelContextStatus.EMCS_POST_END
elif header == '{Post Warning Info Notified}':
ms.mStatus = EModelContextStatus.EMCS_POST_END
elif header == '{Post Message Info Notified}':
ms.mStatus = EModelContextStatus.EMCS_POST_END
elif header == '{Leave State Notified}':
ms.mStatus = EModelContextStatus.EMCS_STATE_LEAVE_END
elif header == '{Finalize Notified}':
ms.mStatus = EModelContextStatus.EMCS_FINALIZE_END
return
elif header == '{GetModelAssembly Notified}':
ms.mStatus = EModelContextStatus.EMCS_COMMON_END
elif header == '{SetControlParams Notified}':
#TODO Control Parameter
pass
elif header == '{kill}':
return sys.exit()
else :
print('Unknown Command!')
pass
|
thread.py | import cv2, threading, queue
class ThreadingClass:
# initiate threading class
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
# define an empty queue and thread
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read the frames as soon as they are available, discard any unprocessed frames;
# this approach removes OpenCV's internal buffer and reduces the frame lag
def _reader(self):
while True:
(ret, frame) = self.cap.read() # read the frames and ---
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait()
except queue.Empty:
pass
self.q.put(frame) # --- store them in a queue (instead of the buffer)
def read(self):
return self.q.get() # fetch frames from the queue one by one
|
main2_lps.py | from tensorflow import keras
import numpy as np
import cv2
import os
import random
import shutil
import pandas as pd
import csv
import zipfile
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dropout, Flatten, Dense,Input
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from keras.applications.xception import Xception
from keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from tensorflow.keras.callbacks import ModelCheckpoint
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.initializers import RandomNormal
import keras.backend as k
from sklearn.utils import shuffle
import io
from PIL import Image as pil_image
from keras_retinanet import layers
import tensorflow.keras.backend as k
import keras_retinanet
import tensorflow as tf
from multiprocessing import Process, Queue, Value, Manager
from ctypes import c_char_p
import socket
import pickle
NUM_EPOCHS = 20
TRAIN_BATCHES = 1246
NUM_ITERS = 24920
MAX_WORKERS = 1
port = 17000
TCP_IP = '127.0.0.1'
fold_num=1 #Select Fold Number
# train_datagen = ImageDataGenerator(horizontal_flip=True,vertical_flip=True,zoom_range=0.05,rotation_range=360,width_shift_range=0.05,height_shift_range=0.05,shear_range=0.05)
# test_datagen = ImageDataGenerator()
# train_df =pd.read_csv('drive/My Drive/COVID-CTset/CSV/train{}.csv'.format(fold_num)) #raed train csv file
# train_df =pd.read_csv('data/CSV/train{}.csv'.format(fold_num)) #raed train csv file
# validation_df = pd.read_csv('drive/My Drive/COVID-CTset/CSV/validation{}.csv'.format(fold_num)) #raed validation csv file (Validation in the training process)
# validation_df = pd.read_csv('data/CSV/validation{}.csv'.format(fold_num)) #raed validation csv file (Validation in the training process)
# train_df = shuffle(train_df) #Shuffle the train data
# test_df = pd.read_csv('drive/My Drive/COVID-CTset/CSV/test{}.csv'.format(fold_num))#raed test csv file (For evaluating the final version of the trained network)
# test_df = pd.read_csv('data/CSV/test{}.csv'.format(fold_num))#raed test csv file (For evaluating the final version of the trained network)
shape=(512,512,1) #shape of the dataset images (in TIFF format)
print(shape[:2])
# #Create the generators
# train_generator = train_datagen.flow_from_dataframe(
# dataframe=train_df,
# directory='data',
# x_col="filename",
# y_col="class",
# target_size=shape[:2],
# batch_size=3,
# class_mode='categorical',color_mode="grayscale",shuffle=True)
# validation_generator = test_datagen.flow_from_dataframe(
# dataframe=validation_df,
# directory='data',
# x_col="filename",
# y_col="class",
# target_size=shape[:2],
# batch_size=2,
# class_mode='categorical',color_mode="grayscale",shuffle=True)
# test_generator = test_datagen.flow_from_dataframe(
# dataframe=test_df,
# directory='data',
# x_col="filename",
# y_col="class",
# target_size=shape[:2],
# batch_size=2,
# class_mode='categorical',color_mode="grayscale",shuffle=True)
def safe_recv(size, server_socket):
data = bytearray()
while 1:
try:
temp = server_socket.recv(size - len(data))
data.extend(temp)
recv_size = len(data)
if recv_size >= size:
break
except:
print("Error")
data = bytes(data)
return data
def handleWorker(port, gradients_q, done_flag, global_var_vals, ack_q, n):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to port : ", port)
s.bind((TCP_IP, port))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
k = 0
while 1:
size = safe_recv(17, conn)
size = pickle.loads(size)
data = safe_recv(size, conn)
# print("Received size: ", size)
local_worker_gradients = pickle.loads(data)
# print(local_worker_gradients)
gradients_q.put(local_worker_gradients)
while (done_flag.value == 0):
pass
size = len(global_var_vals.value)
size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL)
conn.sendall(size)
print("Send size: "+str(len(size)))
conn.sendall(global_var_vals.value)
ack_q.put(1)
k = k + 1
# print("Worker: ", k)
if (k == (n + 1)):
print("Working: Breaking from loop")
break
conn.close()
s.close()
# global gradients_q
# global global_var_vals
# global ack_q
# global done_flag
# port = int(sys.argv[1])
# MAX_WORKERS = int(sys.argv[2])
# port = 17000
# MAX_WORKERS = 1
gradients_q = Queue()
ack_q = Queue()
manager = Manager()
global_var_vals = manager.Value(c_char_p, "")
done_flag = manager.Value('i', 0)
# n = int(FLAGS.max_steps / MAX_WORKERS)
# print("Each worker does ", n, " iterations")
process_list = []
for i in range(MAX_WORKERS):
process_port = port + i + 1
p = Process(target=handleWorker, args=(process_port, gradients_q, done_flag, global_var_vals, ack_q, NUM_ITERS))
p.start()
process_list.append(p)
k.clear_session() #Clear keras backend
try:
os.mkdir('models') #create folder for saving the trained networks
except:
pass
full_name='ResNet50V2-FPN-fold{}'.format(fold_num)
classes_number=2 #Number of classes (normal and COVID-19)
input_tensor=Input(shape=shape)
weight_model = ResNet50V2(weights='imagenet', include_top=False) #Load ResNet50V2 ImageNet pre-trained weights
weight_model.save_weights('weights.h5') #Save the weights
base_model = ResNet50V2(weights=None, include_top=False, input_tensor=input_tensor) #Load the ResNet50V2 model without weights
base_model.load_weights('weights.h5',skip_mismatch=True, by_name=True) #Load the ImageNet weights on the ResNet50V2 model except the first layer(because the first layer has one channel in our case)
#Create Feature Pyramid Network (FPN)
# We used some help for writing the Pyramid from the written code on https://github.com/fizyr/keras-retinanet/blob/master/keras_retinanet/models/retinanet.py
feature_size=256 #Set the feature channels of the FPN
layer_names = ["conv4_block1_preact_relu", "conv5_block1_preact_relu", "post_relu"] #Layers of ResNet50V2 with different scale features
layer_outputs = [base_model.get_layer(name).output for name in layer_names]
C3, C4, C5=layer_outputs #Features of different scales, extracted from ResNet50V2
# C3 = C3(base_model.input)
# C4 = C4(base_model.input)
# C5 = C5(base_model.input)
P5 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C5_reduced')(C5)
P5_upsampled = layers.UpsampleLike(name='P5_upsampled')([P5, C4])
P5 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P5')(P5)
# Concatenate P5 elementwise to C4
P4 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced')(C4)
P4 = keras.layers.Concatenate(axis=3)([P5_upsampled, P4])
P4_upsampled = layers.UpsampleLike(name='P4_upsampled')([P4, C3])
P4 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, name='P4')(P4)
# Concatenate P4 elementwise to C3
P3 = keras.layers.Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced')(C3)
P3 = keras.layers.Concatenate(axis=3)([P4_upsampled, P3])
P3 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=1, name='P3')(P3)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
P6 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P6')(C5)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
P7 = keras.layers.Activation('relu', name='C6_relu')(P6)
P7 = keras.layers.Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P7')(P7)
# Run classification for each of the generated features from the pyramid
feature1 = Flatten()(P3)
dp1 = Dropout(0.5)(feature1)
preds1 = Dense(2, activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(dp1)
#################################################################
feature2 = Flatten()(P4)
dp2 = Dropout(0.5)(feature2)
preds2 = Dense(2, activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(dp2)
#################################################################
feature3 = Flatten()(P5)
dp3= Dropout(0.5)(feature3)
preds3 = Dense(2, activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(dp3)
#################################################################
feature4 = Flatten()(P6)
dp4 = Dropout(0.5)(feature4)
preds4 = Dense(2, activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(dp4)
#################################################################
feature5 = Flatten()(P7)
dp5 = Dropout(0.5)(feature5)
preds5 = Dense(2, activation='relu',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(dp5)
#################################################################
concat=keras.layers.Concatenate(axis=1)([preds1,preds2,preds3,preds4,preds5]) #Concatenate the predictions(Classification results) of each of the pyramid features
out=keras.layers.Dense(2,activation='softmax',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(concat) #Final Classification
# print(base_model.input)
model = Model(inputs=base_model.input, outputs=out) #Create the Training Model
#######################################################
for layer in model.layers:
layer.trainable = True
# model.compile(optimizer=optimizers.Nadam(lr=0.0001), loss='categorical_crossentropy',metrics=['accuracy'])
filepath="models/%s-{epoch:02d}-{val_accuracy:.4f}.hdf5"%full_name # Path to save the trained models
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', save_best_only=True, mode='max') #creating checkpoint to save the best validation accuracy
callbacks_list = [checkpoint]
# model.fit_generator(train_generator, epochs=20,validation_data=validation_generator,shuffle=True,callbacks=callbacks_list) #start training
optimizer = optimizers.Nadam(lr=0.0001)
for epoch in range(NUM_EPOCHS):
print("###############################################")
# Iterate over the batches of a dataset.
# for step, (x, y) in enumerate(train_batches):
for tb in range(TRAIN_BATCHES):
# with tf.GradientTape() as tape:
# logits = model(x)
# Compute the loss value for this batch.
# loss_value = loss_fn(y, logits)
# Update the state of the `accuracy` metric.
# accuracy.update_state(y, logits)
# Update the weights of the model to minimize the loss value.
# gradients = tape.gradient(loss_value, model.trainable_weights)
# print(gradients)
for w in range(MAX_WORKERS):
recv_grads = gradients_q.get()
# print(recv_grads[-1])
# feed_dict = {}
# for j, grad_var in enumerate(recv_grads):
# feed_dict[self.placeholder_gradients[j][0]] = recv_grads[j]
optimizer.apply_gradients(zip(recv_grads, model.trainable_weights))
# res = self.sess.run(self.apply_grads, feed_dict=feed_dict)
# var_val = []
#
# for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# # v_temp = self.sess.run(v, feed_dict=feed_dict)
# v_temp = self.sess.run(v)
# var_val.append(v_temp)
weight_list = []
for w in model.trainable_weights:
weight_list.append(w.numpy())
# print(var_val)
global_var_vals.value = pickle.dumps(weight_list, pickle.HIGHEST_PROTOCOL)
# print("New values of variables ready")
done_flag.value = 1
for i in range(MAX_WORKERS):
val = ack_q.get()
done_flag.value = 0
# accuracy = tf.keras.metrics.CategoricalAccuracy()
# accuracy_val = tf.keras.metrics.CategoricalAccuracy()
# loss_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
#
# STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
# STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size
# print("Step size len")
# print(STEP_SIZE_TRAIN)
# print(STEP_SIZE_VALID)
#
# for epoch in range(20):
# print("###############################################")
# # Iterate over the batches of a dataset.
# for step, (x, y) in enumerate(train_generator):
# # model(x)
# # print(x)
# with tf.GradientTape() as tape:
# # print(k.constant(x))
# # logits = model(k.constant(x), run_eagerly=True)
# logits = model(x, training = True)
# print(logits)
# # Compute the loss value for this batch.
# loss_value = loss_fn(y, logits)
#
# # Update the state of the `accuracy` metric.
# # accuracy.update_state(y, logits)
#
# # Update the weights of the model to minimize the loss value.
# gradients = tape.gradient(loss_value, model.trainable_weights)
# print(gradients)
# print(len(model.trainable_weights))
# optimizer.apply_gradients(zip(gradients, model.trainable_weights))
#
# weight_list = []
# for w in model.trainable_weights:
# weight_list.append(w.numpy())
# i = 0
# for w in model.trainable_weights:
# w.assign(weight_list[i])
# i += 1
#
# # Logging the current accuracy value so far.
# if step % 2 == 0:
# print("Epoch:", epoch, "Step:", step, "Loss value:", loss_value.numpy())
# print("Total running accuracy so far: %.3f" % accuracy.result())
# if step > STEP_SIZE_TRAIN:
# break
#
# # Reset the metric's state at the end of an epoch
# accuracy.reset_states()
#
# total_val_loss = 0
# for step, (x, y) in enumerate(validation_generator):
# with tf.GradientTape() as tape:
# logits = model(tf.convert_to_tensor(x, dtype=tf.float32))
# # Compute the loss value for this batch.
# loss_value = loss_fn(y, logits)
#
# # Update the state of the `accuracy` metric.
# accuracy_val.update_state(y, logits)
# total_val_loss += loss_value.numpy()
# if step > STEP_SIZE_VALID:
# break
#
# # Logging the current accuracy value so far.
# print("Validation Loss value:", total_val_loss / STEP_SIZE_VALID)
# print("Total validation accuracy so far: %.3f" % (accuracy_val.result()))
# # Reset the metric's state at the end of an epoch
# accuracy_val.reset_states()
# #Model Evaluation
# trained_models=[]
# for r,d,f in os.walk('models'): #Take the path to the trained nets
# for file in f:
# if '.hdf5' in file:
# trained_models.append(os.path.join(r,file))
#
# reports=[]
# for trn_model in trained_models: #evaluate the network on each trained net
# k.clear_session()
# custom_object={'UpsampleLike': keras_retinanet.layers._misc.UpsampleLike}
# netpath=trn_model
# model_name=trn_model
# fold_num=trn_model[trn_model.index('fold')+4] #find the fold number
# net=keras.models.load_model(netpath, custom_objects=custom_object) #load model
#
# covid_label= test_generator.class_indices['covid'] #get the index of COVID-19 class
# normal_label= test_generator.class_indices['normal'] #get the index of normal class
# tp=0 #True Positives
# fp=0 #False Positives
# anum=0 #All the images numbers
# ###########
# ctp=0 #Correct classified COVID-19 cases
# cfp=0 #Wrong classified COVID-19 cases
# cfn=0 #Not classified COVID-19 cases
# ctn=0 #Correctly not classified COVID-19 cases
# cnum=0 #Number of COVID cases
# ################
# ntp=0 #Correct classified normal cases
# nfp=0 #Wrong classified normal cases
# nfn=0 #Not classified normal cases
# ntn=0 #Correctly not classified normal cases
# nnum=0 #Number of normal cases
# for num,img_name in enumerate(test_generator.filenames): #load image
# gt_ind=test_generator.classes[num] #get the loaded image class index
# img=cv2.imread(os.path.join('data',img_name),cv2.IMREAD_UNCHANGED) #load image
# pred_ind=np.argmax(net.predict(np.expand_dims(np.expand_dims(img,axis=0),axis=3))[0]) #get the predicted class index
# anum+=1 #count the number of images
# if gt_ind==covid_label:
# cnum+=1
# if pred_ind==covid_label:
# tp+=1
# ctp+=1
# ntn+=1
# else:
# fp+=1
# nfp+=1
# cfn+=1
# elif gt_ind==normal_label:
# nnum+=1
# if pred_ind==normal_label:
# ctn+=1
# ntp+=1
# tp+=1
# else:
# cfp+=1
# nfn+=1
# fp+=1
#
# overall_acc=tp/(tp+fp) #overall accuracy
# cacc=(ctp+ctn)/(ctp+ctn+cfp+cfn) #covid accurayc
# nacc=(ntp+ntn)/(ntp+ntn+nfp+nfn) #normal accuracy
# csens=ctp/(ctp+cfn) #covid sensitivity
# nsens=ntp/(ntp+nfn) #normal sensitivity
# cspec=ctn/(ctn+cfp) #covid specificity
# nspec=ntn/(ntn+nfp) #normal specificity
# cprec=ctp/(ctp+cfp) #covid precision
# nprec=ntp/(ntp+nfp) #normal precision
#
# reports.append([model_name,fold_num,tp,fp,ctp,cfn,cfp,ntp,nfn,nfp,overall_acc,cacc,nacc,csens,nsens,cspec,nspec,cprec,nprec])
#
#
# print(model_name)
# print('tp: ',tp,'fp: ',fp)
#
# with open('FPN.csv', mode='w',newline='') as csv_file:
# csvwriter = csv.writer(csv_file, delimiter=',', quotechar='"',quoting=csv.QUOTE_MINIMAL)
# csvwriter.writerow(['model_name','fold_num','tp','fp','ctp','cfn','cfp','ntp','nfn','nfp','overall_acc','cacc','nacc','csens','nsens','cspec','nspec','cprec','nprec'])
# for row in reports:
# csvwriter.writerow(row)
|
__init__.py | # This file is a part of ninfs.
#
# Copyright (c) 2017-2021 Ian Burgwin
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE.md in the root of this project.
import sys
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as mb
import webbrowser
from inspect import cleandoc
from os.path import dirname, join
from pprint import pformat
from subprocess import Popen, PIPE, STDOUT, TimeoutExpired, check_call
from threading import Thread
from traceback import format_exc
from typing import TYPE_CHECKING
from uuid import uuid4
from .about import NinfsAbout
from .confighandler import get_bool, set_bool
from .settings import NinfsSettings
from .updatecheck import thread_update_check
from .wizardcontainer import WizardContainer, WizardTypeSelector, WizardFailedMount
if TYPE_CHECKING:
from typing import Callable, Dict, List, Tuple
tutorial_url = 'https://gbatemp.net/threads/499994/'
is_windows = sys.platform == 'win32'
is_mac = sys.platform == 'darwin'
# cx_Freeze, PyInstaller, etc.
frozen = getattr(sys, 'frozen', None)
executable = sys.executable
if is_windows:
from os.path import isdir as check_mountpoint
from signal import CTRL_BREAK_EVENT
from subprocess import CREATE_NEW_PROCESS_GROUP
if frozen:
print('Using console exe')
executable = join(dirname(sys.executable), 'ninfs.exe')
print(executable)
else:
from os.path import ismount as check_mountpoint
def thread_output_reader(gui: 'NinfsGUI', proc: 'Popen', uuid: 'str', output_list: 'List[str]'):
while proc.poll() is None:
for line in proc.stdout:
if line != '':
line = line.rstrip('\r\n')
output_list.append(line)
# if the uuid is not in the mounts dict, then it was killed by this script
if proc.returncode and uuid in gui.mounts:
gui.remove_mount_info(uuid)
wizard_window = WizardContainer(gui)
wizard_window.change_frame(WizardFailedMount, returncode=proc.returncode, output=output_list, kind='crash')
wizard_window.focus()
class NinfsGUI(tk.Tk):
def __init__(self):
super().__init__()
self.mounts: Dict[str, Tuple[Popen, Thread, List[str], str]] = {}
container = ttk.Frame(self)
container.pack(fill=tk.BOTH, expand=True)
self.wm_withdraw()
self.wm_title('ninfs')
self.ico_path = self.get_data_file(join('data', 'windows.ico'))
self.wm_minsize(500, 300)
self.create_menu_bar()
style = ttk.Style(container)
style.configure('TMenubutton', background='gainsboro')
container.rowconfigure(0, weight=0)
container.rowconfigure(1, weight=1)
container.rowconfigure(2, weight=0)
container.columnconfigure(0, weight=1)
header = ttk.Label(container, text='Mounted contents', font=(None, 15, 'bold'), justify=tk.LEFT)
header.grid(row=0, column=0, padx=10, pady=8, sticky=tk.W)
mount_treeview_frame = ttk.Frame(container)
mount_treeview_frame.grid(row=1, column=0, sticky=tk.NSEW, padx=10)
mount_treeview_frame.rowconfigure(0, weight=1)
mount_treeview_frame.columnconfigure(0, weight=1)
mount_treeview_frame.columnconfigure(1, weight=0)
self.mount_treeview = ttk.Treeview(mount_treeview_frame)
self.mount_treeview.grid(row=0, column=0, sticky=tk.NSEW)
self.mount_treeview.configure(columns=('mount_path', 'mount_type', 'mounted_item'), show='headings')
self.mount_treeview.column('mount_path', width=100, anchor=tk.W)
self.mount_treeview.heading('mount_path', text='Mount Path')
self.mount_treeview.column('mount_type', width=50, anchor=tk.W)
self.mount_treeview.heading('mount_type', text='Type')
self.mount_treeview.column('mounted_item', width=200, anchor=tk.W)
self.mount_treeview.heading('mounted_item', text='Mounted Content')
mount_treeview_scrollbar = ttk.Scrollbar(mount_treeview_frame, orient=tk.VERTICAL,
command=self.mount_treeview.yview)
self.mount_treeview.configure(yscrollcommand=mount_treeview_scrollbar.set)
mount_treeview_scrollbar.grid(row=0, column=1, sticky=tk.NS)
actions_frame = ttk.Frame(container)
actions_frame.grid(row=2, column=0, padx=10, pady=10, sticky=tk.W)
new_mount_button = ttk.Button(actions_frame, text='New mount', command=self.show_wizard)
new_mount_button.pack(side=tk.LEFT)
unmount = ttk.Button(actions_frame, text='Unmount selected', command=self.unmount_selected)
unmount.pack(side=tk.LEFT)
self.wm_protocol('WM_DELETE_WINDOW', self.on_close)
def get_data_file(self, path):
return join(dirname(__file__), path)
def set_icon(self, wm: 'tk.Wm'):
# just in case this also works on other platforms, but this assumption is likely wrong lol
# TODO: fix this for Linux
if is_windows:
wm.wm_iconbitmap(self.ico_path)
def check_fuse(self):
try:
import fuse
except EnvironmentError as e:
if e.args[0] == 'Unable to find libfuse':
if is_windows:
res = mb.askyesno('Failed to load libfuse',
'Failed to load libfuse. WinFsp needs to be installed.\n\n'
'Would you like to open the WinFsp download page?\n'
'http://www.secfs.net/winfsp/rel/')
if res:
webbrowser.open('http://www.secfs.net/winfsp/rel/')
elif is_mac:
res = mb.askyesno('Failed to load libfuse',
'Failed to load libfuse. FUSE for macOS needs to be installed.\n\n'
'Would you like to open the FUSE for macOS download page?\n'
'https://osxfuse.github.io')
if res:
webbrowser.open('https://osxfuse.github.io')
else:
mb.showerror('Failed to load libfuse.')
return False
except Exception:
mb.showerror('Failed to import fusepy', 'Failed to import fusepy.\n\n' + format_exc())
return False
return True
def mainloop(self, n=0):
self.wm_deiconify()
if not get_bool('internal', 'askedonlinecheck'):
message = '''
Check for updates online?
This will make a request to GitHub every time the ninfs gui is opened.
This can be changed any time in Settings.
'''
if mb.askyesno('Check for updates', cleandoc(message)):
set_bool('update', 'onlinecheck', True)
set_bool('internal', 'askedonlinecheck', True)
if get_bool('update', 'onlinecheck'):
update_thread = Thread(target=thread_update_check, args=(self,))
update_thread.start()
super().mainloop(n)
def on_close(self):
if self.mounts:
if mb.askokcancel('Exiting', 'Closing ninfs will unmount all contents.'):
self.unmount_all(force=True)
self.destroy()
else:
self.destroy()
def show_settings(self):
settings_window = NinfsSettings(self)
settings_window.focus()
def show_wizard(self):
wizard_window = WizardContainer(self)
wizard_window.change_frame(WizardTypeSelector)
wizard_window.focus()
def mount(self, mounttype: 'str', cmdargs: 'List[str]', mountpoint: str, callback_success: 'Callable',
callback_failed: 'Callable'):
args = [executable]
if not frozen:
args.append(dirname(dirname(__file__)))
args.extend(cmdargs)
args.append('-f')
args.append(mountpoint)
popen_opts = {}
if is_windows:
popen_opts['creationflags'] = CREATE_NEW_PROCESS_GROUP
uuid = str(uuid4())
output_list = ['Command: ' + pformat(args), '']
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8', **popen_opts)
thread = Thread(target=thread_output_reader, args=(self, proc, uuid, output_list))
mount_info = (proc, thread, output_list, mountpoint)
def check_loop():
if check_mountpoint(mountpoint):
self.mount_treeview.insert('', tk.END, text=uuid, iid=uuid, values=(mountpoint, mounttype, cmdargs[1]))
self.mounts[uuid] = mount_info
callback_success()
return
if proc.poll() is not None:
thread.join()
callback_failed(proc.returncode, output_list)
return
self.after(500, check_loop)
thread.start()
self.after(500, check_loop)
def unmount_selected(self):
# the mounts dict gets modified during iteration, so the list of keys is cloned to prevent issues
# it's also done in reverse, since later mounts might be based on earlier ones
selection = self.mount_treeview.selection()
for s in reversed(selection):
self.unmount(s)
def unmount_all(self, *, force: bool = False):
# the mounts dict gets modified during iteration, so the list of keys is cloned to prevent issues
# it's also done in reverse, since later mounts might be based on earlier ones
for uuid in reversed(self.mount_treeview.get_children()):
self.unmount(uuid, force=force)
def remove_mount_info(self, uuid: str):
self.mount_treeview.delete(uuid)
del self.mounts[uuid]
def unmount(self, uuid: 'str', *, force: bool = False):
mount_info = self.mounts[uuid]
if is_windows:
mount_info[0].send_signal(CTRL_BREAK_EVENT)
try:
mount_info[0].wait(3)
except TimeoutExpired:
if force:
self.remove_mount_info(uuid)
mount_info[0].kill()
else:
res = mb.askyesno('Mount not responding', 'The mount subprocess is not responding.\nTerminate it?')
if res:
self.remove_mount_info(uuid)
mount_info[0].kill()
else:
self.remove_mount_info(uuid)
else:
# I think this is cheating
if is_mac:
check_call(['diskutil', 'unmount', mount_info[3]])
self.remove_mount_info(uuid)
else:
# assuming linux or bsd, which have fusermount
check_call(['fusermount', '-u', mount_info[3]])
self.remove_mount_info(uuid)
@staticmethod
def show_tutorial():
webbrowser.open(tutorial_url)
def show_about(self):
about_window = NinfsAbout(self)
about_window.focus()
def create_menu_bar(self):
self.option_add('*tearOff', tk.FALSE)
menubar = tk.Menu(self)
if is_mac:
apple_menu = tk.Menu(menubar, name='apple')
apple_menu.add_command(label='About ninfs', command=self.show_about)
apple_menu.add_separator()
menubar.add_cascade(menu=apple_menu)
self.createcommand('tk::mac::ShowPreferences', self.show_settings)
file_menu = tk.Menu(menubar)
if not is_mac:
file_menu.add_command(label='Settings', command=self.show_settings)
help_menu = tk.Menu(menubar)
help_menu.add_command(label='Open tutorial on GBAtemp', command=self.show_tutorial)
if not is_mac:
help_menu.add_command(label='About ninfs', command=self.show_about)
menubar.add_cascade(label='File', menu=file_menu)
menubar.add_cascade(label='Help', menu=help_menu)
self.configure(menu=menubar)
def start_gui():
window = NinfsGUI()
if is_windows:
from ctypes import windll, get_last_error
from os import environ, getpid
environ['NINFS_GUI_PARENT_PID'] = str(getpid())
if not windll.kernel32.GetConsoleWindow():
# if there is no console, make one and hide it
# this is not an elegant solution but it lets us use send_signal on subprocesses
if not windll.kernel32.AllocConsole():
# AllocConsole fails when I'm testing in PyCharm but get_last_error returns 0, meaning it succeeded.
# I don't know why this happens.
err = get_last_error()
if err:
print('Failed to use AllocConsole:', err)
else:
windll.user32.ShowWindow(windll.kernel32.GetConsoleWindow(), 0) # SW_HIDE
if window.check_fuse():
window.mainloop()
return 0
else:
return 70
|
relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import logging
import threading
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from tvm.target import Target
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=3,
config={
"relay.backend.use_auto_scheduler": True,
"relay.backend.disable_compile_engine_cache": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
try:
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(opt_mod["main"])
except tvm.TVMError:
print(
"Get errors with GraphExecutorCodegen for task extraction. "
"Fallback to VMCompiler."
)
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
compiler.lower(mod, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod, params, target, target_host=None, hardware_params=None, include_simple_tasks=False
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
target, target_host = Target.check_and_update_host_consist(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
func_i = 0
for (func_name, wkl_key), weight in env.wkl_key_to_weight.items():
#print(func_i," : ",func_name)
func_i = func_i + 1
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=func_name,
)
)
weights.append(weight)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
key = (func_name, workload_key)
if key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[key] = 0
self.wkl_key_to_weight[key] += 1
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import (
prepare_input_map,
) # lazily import to avoid recursive dependency
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
roll_pair.py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from concurrent.futures import wait, FIRST_EXCEPTION
from threading import Thread
from eggroll.core.aspects import _method_profile_logger
from eggroll.core.client import CommandClient
from eggroll.core.command.command_model import CommandURI
from eggroll.core.conf_keys import SessionConfKeys
from eggroll.core.constants import StoreTypes, SerdesTypes, PartitionerTypes
from eggroll.core.datastructure.broker import FifoBroker
from eggroll.core.meta_model import ErStoreLocator, ErJob, ErStore, ErFunctor, \
ErTask, ErPair, ErPartition
from eggroll.core.serdes import cloudpickle
from eggroll.core.session import ErSession
from eggroll.core.utils import generate_job_id, generate_task_id
from eggroll.core.utils import string_to_bytes, hash_code
from eggroll.roll_pair import create_serdes
from eggroll.roll_pair.transfer_pair import TransferPair, BatchBroker
from eggroll.roll_pair.utils.gc_utils import GcRecorder
from eggroll.roll_pair.utils.pair_utils import partitioner
from eggroll.utils.log_utils import get_logger
L = get_logger()
def runtime_init(session: ErSession):
rpc = RollPairContext(session=session)
return rpc
class RollPairContext(object):
def __init__(self, session: ErSession):
self.__session = session
self.session_id = session.get_session_id()
self.default_store_type = StoreTypes.ROLLPAIR_LMDB
self.default_store_serdes = SerdesTypes.PICKLE
self.deploy_mode = session.get_option(SessionConfKeys.CONFKEY_SESSION_DEPLOY_MODE)
self.__session_meta = session.get_session_meta()
self.__session.add_exit_task(self.context_gc)
self.rpc_gc_enable = True
self.gc_recorder = GcRecorder(self)
def set_store_type(self, store_type: str):
self.default_store_type = store_type
def set_store_serdes(self, serdes_type: str):
self.default_store_serdes = serdes_type
def set_session_gc_enable(self):
self.rpc_gc_enable = True
def set_session_gc_disable(self):
self.rpc_gc_enable = False
def get_session(self):
return self.__session
def get_roll(self):
ret = self.__session._rolls[0]
if not ret._command_endpoint._host or not ret._command_endpoint._port:
L.error(f"invalid roll processor:{ret}, session_meta:{self.__session_meta}")
raise ValueError(f"invalid roll endpoint:{ret}")
return ret
def context_gc(self):
if self.gc_recorder.gc_recorder is None:
L.error("rp context gc_recorder is None!")
return
for item in list(self.gc_recorder.gc_recorder.get_all()):
L.debug("cleanup item:{}".format(item))
name = item[0]
rp = self.load(namespace=self.session_id, name=name)
rp.destroy()
def route_to_egg(self, partition: ErPartition):
return self.__session.route_to_egg(partition)
def populate_processor(self, store: ErStore):
populated_partitions = list()
for p in store._partitions:
pp = ErPartition(id=p._id, store_locator=p._store_locator, processor=self.route_to_egg(p))
populated_partitions.append(pp)
return ErStore(store_locator=store._store_locator, partitions=populated_partitions, options=store._options)
def load(self, namespace=None, name=None, options: dict = None):
if options is None:
options = {}
store_type = options.get('store_type', self.default_store_type)
total_partitions = options.get('total_partitions', 1)
partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH)
store_serdes = options.get('serdes', self.default_store_serdes)
create_if_missing = options.get('create_if_missing', True)
# todo:1: add combine options to pass it through
store_options = self.__session.get_all_options()
store_options.update(options)
final_options = store_options.copy()
# TODO:1: tostring in er model
if 'create_if_missing' in final_options:
del final_options['create_if_missing']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'include_key' in final_options:
del final_options['include_key']
if 'total_partitions' in final_options:
del final_options['total_partitions']
if 'name' in final_options:
del final_options['name']
if 'namespace' in final_options:
del final_options['namespace']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'keys_only' in final_options:
del final_options['keys_only']
# TODO:0: add 'error_if_exist, persistent / default store type'
L.info("final_options:{}".format(final_options))
store = ErStore(
store_locator=ErStoreLocator(
store_type=store_type,
namespace=namespace,
name=name,
total_partitions=total_partitions,
partitioner=partitioner,
serdes=store_serdes),
options=final_options)
if create_if_missing:
result = self.__session._cluster_manager_client.get_or_create_store(store)
else:
result = self.__session._cluster_manager_client.get_store(store)
if result is None:
raise EnvironmentError(
"result is None, please check whether the store:{} has been created before".format(store))
return RollPair(self.populate_processor(result), self)
# TODO:1: separates load parameters and put all parameters
def parallelize(self, data, options: dict = None):
if options is None:
options = {}
namespace = options.get("namespace", None)
name = options.get("name", None)
options['store_type'] = options.get("store_type", StoreTypes.ROLLPAIR_IN_MEMORY)
create_if_missing = options.get("create_if_missing", True)
if namespace is None:
namespace = self.session_id
if name is None:
name = str(uuid.uuid1())
rp = self.load(namespace=namespace, name=name, options=options)
return rp.put_all(data, options=options)
'''store name only supports full name and reg: *, *abc ,abc* and a*c'''
def cleanup(self, namespace, name, options: dict = None):
if options is None:
options = {}
total_partitions = options.get('total_partitions', 1)
partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH)
store_serdes = options.get('serdes', self.default_store_serdes)
# todo:1: add combine options to pass it through
store_options = self.__session.get_all_options()
store_options.update(options)
final_options = store_options.copy()
# TODO:1: tostring in er model
if 'create_if_missing' in final_options:
del final_options['create_if_missing']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'include_key' in final_options:
del final_options['include_key']
if 'total_partitions' in final_options:
del final_options['total_partitions']
if 'name' in final_options:
del final_options['name']
if 'namespace' in final_options:
del final_options['namespace']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'keys_only' in final_options:
del final_options['keys_only']
# TODO:0: add 'error_if_exist, persistent / default store type'
L.info("final_options:{}".format(final_options))
store = ErStore(
store_locator=ErStoreLocator(
store_type=StoreTypes.ROLLPAIR_LMDB,
namespace=namespace,
name=name,
total_partitions=total_partitions,
partitioner=partitioner,
serdes=store_serdes),
options=final_options)
results = self.__session._cluster_manager_client.get_store_from_namespace(store)
L.debug('res:{}'.format(results._stores))
if results._stores is not None:
L.debug("item count:{}".format(len(results._stores)))
for item in results._stores:
L.debug("item namespace:{} name:{}".format(item._store_locator._namespace,
item._store_locator._name))
rp = RollPair(er_store=item, rp_ctx=self)
rp.destroy()
def default_partitioner(k):
return 0
def default_egg_router(k):
return 0
class RollPair(object):
ROLL_PAIR_URI_PREFIX = 'v1/roll-pair'
EGG_PAIR_URI_PREFIX = 'v1/egg-pair'
RUN_JOB = 'runJob'
RUN_TASK = 'runTask'
AGGREGATE = 'aggregate'
COLLAPSE_PARTITIONS = 'collapsePartitions'
COUNT = 'count'
DELETE = "delete"
DESTROY = "destroy"
FILTER = 'filter'
FLAT_MAP = 'flatMap'
GET = "get"
GET_ALL = "getAll"
GLOM = 'glom'
JOIN = 'join'
MAP = 'map'
MAP_PARTITIONS = 'mapPartitions'
MAP_VALUES = 'mapValues'
PUT = "put"
PUT_ALL = "putAll"
REDUCE = 'reduce'
SAMPLE = 'sample'
SUBTRACT_BY_KEY = 'subtractByKey'
UNION = 'union'
SERIALIZED_NONE = cloudpickle.dumps(None)
def __setstate__(self, state):
self.gc_enable = None
pass
def __getstate__(self):
pass
def __init__(self, er_store: ErStore, rp_ctx: RollPairContext):
self.__store = er_store
self.ctx = rp_ctx
self.__command_serdes = SerdesTypes.PROTOBUF
self.__roll_pair_master = self.ctx.get_roll()
self.__command_client = CommandClient()
self.functor_serdes =create_serdes(SerdesTypes.CLOUD_PICKLE)
self.value_serdes = self.get_store_serdes()
self.key_serdes = self.get_store_serdes()
self.partitioner = partitioner(hash_code, self.__store._store_locator._total_partitions)
self.egg_router = default_egg_router
self.__session_id = self.ctx.session_id
self.gc_enable = rp_ctx.rpc_gc_enable
self.gc_recorder = rp_ctx.gc_recorder
self.gc_recorder.record(er_store)
def __del__(self):
if self.ctx.get_session().is_stopped():
L.info('session:{} has already been stopped'.format(self.__session_id))
return
if not hasattr(self, 'gc_enable') or not self.gc_enable:
return
if self.ctx.gc_recorder.check_gc_executable(self.__store):
self.ctx.gc_recorder.delete_record(self.__store)
L.debug(f'del rp: {self}')
self.destroy()
L.debug(f"running gc: {sys._getframe().f_code.co_name}. "
f"deleting store name: {self.__store._store_locator._name}, "
f"namespace: {self.__store._store_locator._namespace}")
def __repr__(self):
return f'<RollPair(_store={self.__store}) at {hex(id(self))}>'
def enable_gc(self):
self.gc_enable = True
def disable_gc(self):
self.gc_enable = False
def get_store_serdes(self):
return create_serdes(self.__store._store_locator._serdes)
def get_partitions(self):
return self.__store._store_locator._total_partitions
def get_name(self):
return self.__store._store_locator._name
def get_namespace(self):
return self.__store._store_locator._namespace
def get_store(self):
return self.__store
def get_store_type(self):
return self.__store._store_locator._store_type
def kv_to_bytes(self, **kwargs):
use_serialize = kwargs.get("use_serialize", True)
# can not use is None
if "k" in kwargs and "v" in kwargs:
k, v = kwargs["k"], kwargs["v"]
return (self.value_serdes.serialize(k), self.value_serdes.serialize(v)) if use_serialize \
else (string_to_bytes(k), string_to_bytes(v))
elif "k" in kwargs:
k = kwargs["k"]
return self.value_serdes.serialize(k) if use_serialize else string_to_bytes(k)
elif "v" in kwargs:
v = kwargs["v"]
return self.value_serdes.serialize(v) if use_serialize else string_to_bytes(v)
"""
storage api
"""
@_method_profile_logger
def get(self, k, options: dict = None):
if options is None:
options = {}
L.debug(f"get k: {k}")
k = create_serdes(self.__store._store_locator._serdes).serialize(k)
er_pair = ErPair(key=k, value=None)
outputs = []
value = None
partition_id = self.partitioner(k)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
L.info(f"partitions count: {self.__store._store_locator._total_partitions}, target partition: {partition_id}, endpoint: {egg._command_endpoint}")
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.GET)
job = ErJob(id=job_id,
name=RollPair.GET,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id),
name=RollPair.GET,
inputs=inputs,
outputs=output,
job=job)
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
return self.value_serdes.deserialize(job_resp._value) if job_resp._value != b'' else None
@_method_profile_logger
def put(self, k, v, options: dict = None):
if options is None:
options = {}
k, v = create_serdes(self.__store._store_locator._serdes).serialize(k), \
create_serdes(self.__store._store_locator._serdes).serialize(v)
er_pair = ErPair(key=k, value=v)
outputs = []
partition_id = self.partitioner(k)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=0, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.PUT)
job = ErJob(id=job_id,
name=RollPair.PUT,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id),
name=RollPair.PUT,
inputs=inputs,
outputs=output,
job=job)
L.info("start send req")
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
L.info("get resp:{}".format((job_resp._value)))
value = job_resp._value
return value
@_method_profile_logger
def get_all(self, options: dict = None):
if options is None:
options = {}
L.info('get all functor')
job_id = generate_job_id(self.__session_id, RollPair.GET_ALL)
def send_command():
job = ErJob(id=job_id,
name=RollPair.GET_ALL,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=SerdesTypes.PROTOBUF)
return result
send_command()
populated_store = self.ctx.populate_processor(self.__store)
transfer_pair = TransferPair(transfer_id=job_id)
done_cnt = 0
for k, v in transfer_pair.gather(populated_store):
done_cnt += 1
yield self.key_serdes.deserialize(k), self.value_serdes.deserialize(v)
L.debug(f"get_all count:{done_cnt}")
@_method_profile_logger
def put_all(self, items, output=None, options: dict = None):
if options is None:
options = {}
include_key = options.get("include_key", True)
job_id = generate_job_id(self.__session_id, RollPair.PUT_ALL)
# TODO:1: consider multiprocessing scenario. parallel size should be sent to egg_pair to set write signal count
def send_command():
job = ErJob(id=job_id,
name=RollPair.PUT_ALL,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=SerdesTypes.PROTOBUF)
return result
th = Thread(target=send_command, name=f'roll_pair-send_command-{job_id}')
th.start()
populated_store = self.ctx.populate_processor(self.__store)
shuffler = TransferPair(job_id)
broker = FifoBroker()
bb = BatchBroker(broker)
scatter_future = shuffler.scatter(broker, self.partitioner, populated_store)
key_serdes = self.key_serdes
value_serdes = self.value_serdes
try:
if include_key:
for k, v in items:
bb.put(item=(key_serdes.serialize(k), value_serdes.serialize(v)))
else:
k = 0
for v in items:
bb.put(item=(key_serdes.serialize(k), value_serdes.serialize(v)))
k += 1
finally:
bb.signal_write_finish()
scatter_results = scatter_future.result()
L.debug(f"scatter_results: {scatter_results}")
th.join()
return RollPair(populated_store, self.ctx)
@_method_profile_logger
def count(self):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.COUNT)
job = ErJob(id=job_id,
name=RollPair.COUNT,
inputs=[self.ctx.populate_processor(self.__store)])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, timeout=20, return_when=FIRST_EXCEPTION).done
result = 0
for future in done:
pair = future.result()[0]
result += self.functor_serdes.deserialize(pair._value)
return result
# todo:1: move to command channel to utilize batch command
@_method_profile_logger
def destroy(self):
total_partitions = self.__store._store_locator._total_partitions
job = ErJob(id=generate_job_id(self.__session_id, RollPair.DESTROY),
name=RollPair.DESTROY,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
job_resp = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
self.ctx.get_session()._cluster_manager_client.delete_store(self.__store)
L.info(f'{RollPair.DESTROY}: {self.__store}')
@_method_profile_logger
def delete(self, k, options: dict = None):
if options is None:
options = {}
key = create_serdes(self.__store._store_locator._serdes).serialize(k)
er_pair = ErPair(key=key, value=None)
outputs = []
value = None
partition_id = self.partitioner(key)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
L.info(egg._command_endpoint)
L.info(f"count: {self.__store._store_locator._total_partitions}")
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.DELETE)
job = ErJob(id=job_id,
name=RollPair.DELETE,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id), name=RollPair.DELETE, inputs=inputs, outputs=output, job=job)
L.info("start send req")
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
@_method_profile_logger
def take(self, n: int, options: dict = None):
if options is None:
options = {}
if n <= 0:
n = 1
keys_only = options.get("keys_only", False)
ret = []
count = 0
for item in self.get_all():
if keys_only:
if item:
ret.append(item[0])
else:
ret.append(None)
else:
ret.append(item)
count += 1
if count == n:
break
return ret
@_method_profile_logger
def first(self, options: dict = None):
if options is None:
options = {}
resp = self.take(1, options=options)
if resp:
return resp[0]
else:
return None
@_method_profile_logger
def save_as(self, name, namespace, partition, options: dict = None):
if options is None:
options = {}
store_type = options.get('store_type', self.ctx.default_store_type)
if partition == self.get_partitions():
store = ErStore(store_locator=ErStoreLocator(store_type=store_type, namespace=namespace,
name=name, total_partitions=self.get_partitions()))
return self.map_values(lambda v: v, output=store)
else:
store = ErStore(store_locator=ErStoreLocator(store_type=store_type, namespace=namespace,
name=name, total_partitions=partition))
return self.map(lambda k, v: (k, v), output=store)
"""
computing api
"""
@_method_profile_logger
def map_values(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP_VALUES, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
# todo:1: options issues. refer to line 77
final_options = {}
final_options.update(self.__store._options)
final_options.update(options)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP_VALUES),
name=RollPair.MAP_VALUES,
inputs=[self.__store],
outputs=outputs,
functors=[functor],
options=final_options)
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def map(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP),
name=RollPair.MAP,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def map_partitions(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP_PARTITIONS, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP_PARTITIONS),
name=RollPair.MAP_PARTITIONS,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def collapse_partitions(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.COLLAPSE_PARTITIONS, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.COLLAPSE_PARTITIONS),
name=RollPair.COLLAPSE_PARTITIONS,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def flat_map(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.FLAT_MAP, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.FLAT_MAP),
name=RollPair.FLAT_MAP,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def reduce(self, func, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.REDUCE)
serialized_func = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
job = ErJob(id=job_id,
name=RollPair.REDUCE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_func])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if seq_op_result is not None:
if not first:
result = func(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
@_method_profile_logger
def aggregate(self, zero_value, seq_op, comb_op, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.AGGREGATE)
serialized_zero_value = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(zero_value))
serialized_seq_op = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(seq_op))
job = ErJob(id=job_id,
name=RollPair.AGGREGATE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_zero_value, serialized_seq_op])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if not first:
result = comb_op(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
@_method_profile_logger
def glom(self, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.GLOM, serdes=SerdesTypes.CLOUD_PICKLE)
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.GLOM),
name=RollPair.GLOM,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def sample(self, fraction, seed=None, output=None, options: dict = None):
if options is None:
options = {}
er_fraction = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(fraction))
er_seed = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(seed))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.SAMPLE),
name=RollPair.SAMPLE,
inputs=[self.__store],
outputs=outputs,
functors=[er_fraction, er_seed])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def filter(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.FILTER, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.FILTER),
name=RollPair.FILTER,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def subtract_by_key(self, other, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.SUBTRACT_BY_KEY, serdes=SerdesTypes.CLOUD_PICKLE)
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.SUBTRACT_BY_KEY),
name=RollPair.SUBTRACT_BY_KEY,
inputs=[self.__store, other.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def union(self, other, func=lambda v1, v2: v1, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.UNION, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.UNION),
name=RollPair.UNION,
inputs=[self.__store, other.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def join(self, other, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.JOIN, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
final_options = {}
final_options.update(self.__store._options)
final_options.update(options)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.JOIN),
name=RollPair.JOIN,
inputs=[self.__store, other.__store],
outputs=outputs,
functors=[functor],
options=final_options)
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
L.info(er_store)
return RollPair(er_store, self.ctx)
@_method_profile_logger
def with_stores(self, func, others=None, options: dict = None):
if options is None:
options = {}
tag = "withStores"
if others is None:
others = []
total_partitions = self.get_partitions()
for other in others:
if other.get_partitions() != total_partitions:
raise ValueError(f"diff partitions: expected:{total_partitions}, actual:{other.get_partitions()}")
job_id = generate_job_id(self.__session_id, tag=tag)
job = ErJob(id=job_id,
name=tag,
inputs=[self.ctx.populate_processor(rp.get_store()) for rp in [self] + others],
functors=[ErFunctor(name=tag, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))],
options=options)
args = list()
for i in range(total_partitions):
partition_self = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[store._partitions[i] for store in job._inputs],
job=job)
args.append(([task], partition_self._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
result = list()
for future in futures:
ret_pair = future.result()[0]
result.append((self.functor_serdes.deserialize(ret_pair._key),
self.functor_serdes.deserialize(ret_pair._value)))
return result
|
_processing.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
| @purpose: Hub for running processing and analyzing raw data.
| @date: Created on Sat May 1 15:12:38 2019
| @author: Semeon Risom
| @email: semeon.risom@gmail.com
| @url: https://semeon.io/d/R33-analysis
"""
# available classes and functions
__all__ = ['Processing']
# required external libraries
__required__ = ['distutils','importlib','nslr']
# core
from pdb import set_trace as breakpoint
from pathlib import Path
import numpy as np
import pandas as pd
import math as m
import sys
import os
import glob
## logging
import logging
import datetime
# local libraries
from .. import settings
from . import Metadata
class Processing():
"""Hub for running processing and analyzing raw data."""
def __init__(self, config, isLibrary=True):
"""Hub for running processing and analyzing raw data.
Parameters
----------
config : :class:`dict`
Configuration data. i.e. trial number, location.
isLibrary : :obj:`bool`
Check if required libraries are available. Default `False`.
"""
#check libraries
if isLibrary:
settings.library(__required__)
#set current subject (use for iterations)
self.current_subject = ''
self.cgxy = ''
self.log = ''
self.thisCore = 0
self.config = config
self.filters = [['SavitzkyGolay', 'sg']]
def getEstimatedMonitor(self, diagonal, window):
"""calculate estimate monitor size (w,h;cm) using estimated diagonal monitor (hypotenuse; cm).
Attributes
----------
df_raw : :class:`pandas.DataFrame`
Pandas dataframe of subjects.
"""
ratio = window[0]/window[1]
monitor_x = float((ratio * (m.sqrt((m.pow(diagonal,2)) / (m.pow(ratio,2) + 1)))) * 2.54)
monitor_y = float((m.sqrt((m.pow(diagonal,2)) / (m.pow(ratio,2) + 1))) * 2.54)
monitor = [monitor_x, monitor_y]
return monitor
def preprocess(self, df, window):
"""Initial data cleaning.
Parameters
----------
df : :class:`pandas.DataFrame`
Pandas dataframe of raw data.
window : :class:`tuple`
horizontal, vertical resolution
Attributes
----------
m_delta : :obj:`int`
Maxinum one-sample change in velocity
Notes
----------
remove_missing:
Remove samples with null values.
remove_bounds:
Remove samples outside of window bounds (1920,1080).
remove_spikes:
remove one-sample spikes if x and y-axis delta is greater than 5.
"""
#sort
df = df.sort_values(['TrialNum','sampleNum'],ascending=[True, True])
#1) remove fixation cross samples #!!!
#print(self.console['green'] + '///////////Preprocessing: Drop Fixation Cross samples' + self.console['ENDC'])
#df = df[df['event'] != 'Fixation'].reset_index(drop=True)
#2) get RT from timestamp
d_loc = df[df['event'] == "DotLoc"]
#check if participant responded
if d_loc.shape[0] >=1:
df["RT"] = d_loc['timestamp'][d_loc.index[-1]] - d_loc['timestamp'][d_loc.index[0]]
else:
self.log.warning('event: preprocess(self, df, window), core: %s, subject: %s, trial: %s, dotloc rows: %s'
%(self.thisCore, self.config['subject'], self.config['trial'], d_loc.shape[0]))
#3) set event markers #!!!
df['marker'] = "."
#fixation event
##set marker
#df['marker'][df.index[0]] = "Fixation Onset"
df.loc[df.index[0], 'marker'] = "Fixation Onset"
#stim event
d_stim = df[df['event'] == "Stim"]
##set marker
df.loc[d_stim.index[0], 'marker'] = "Stimulus Onset"
#dotloc event #!!!
##set marker
##check if any samples exist within dotloc onset
if d_loc.shape[0] >=1:
df.loc[d_loc.index[0], 'marker'] = "Dotloc Onset"
else:
self.log.warning('event: preprocess(self, df, window), core: %s, subject: %s, trial: %s, dotloc rows: %s'
%(self.thisCore, self.config['subject'], self.config['trial'], d_loc.shape[0]))
#end trial
#df['marker'][df.index[-1]] = "End Trial"
df.loc[df.index[-1], 'marker'] = "End Trial"
#4) remove missing data #!!!
## convert '.' to null
if self.config['remove_missing']:
print(self.console['green'] + 'Preprocessing: Remove null samples' + self.console['ENDC'])
## drop null
df = df[(df["x"].notnull()) & (df["y"].notnull())].reset_index(drop=True)
else:
#mark as bad sample
df['bad'] = df.apply(lambda x: True if ((x['x'] == '.') or (x['y'] == '.')) else False, axis=1)
#5) remove samples outside of window bounds (i.e. 0>x>1920, 0>y>1080) #!!!
if self.config['remove_bounds']:
print(self.console['green']+'Preprocessing: Remove samples outside of window bounds (i.e. 0 > x > 1920, 0 > y > 1080)'\
+self.console['ENDC'])
df = df[(df['x'] <= window[0]) & (df['y'] <= window[1]) &
(df['x'] >= 0) & (df['y'] >= 0)].reset_index(drop=True)
else:
#mark as bad sample
df['bad'] = df.apply(lambda x: True if ((x['x'] <= 0) or (x['y'] <= 0) or
(x['x'] > window[0]) or (x['y'] > window[1])
) else x['bad'], axis=1)
#6) remove one-sample spikes #!!!
if self.config['remove_spikes']:
print(self.console['green'] + 'Preprocessing: Remove one-sample spikes' + self.console['ENDC'])
max_delta = self.config['spike_delta']
#x
delta = df['x'].diff().shift(-1).abs()
delta.iloc[-1] = delta.iloc[-2]
df = df[(delta < max_delta)]
#y
delta = df['y'].diff().shift(-1).abs()
delta.iloc[-1] = delta.iloc[-2]
df = df[(delta < max_delta)]
# else mark spikes as bad
else:
max_delta = self.config['spike_delta']
#x
delta = df['x'].diff().shift(-1).abs()
delta.iloc[-1] = delta.iloc[-2]
df_spike = df[(delta < max_delta)]
#y
delta = df['y'].diff().shift(-1).abs()
delta.iloc[-1] = delta.iloc[-2]
df_spike = df_spike.loc[(delta < max_delta)]
#mark samples as bad
spike = df_spike.index
df.loc[spike,'bad'] = True
#set x,y to NaN
#df["x"] = df["x"].replace(".", np.NaN)
#df["y"] = df["y"].replace(".", np.NaN)
return df
#getting data for analysis
def getData(self, path=None):
"""preparing data for use in analysis
Parameters
----------
path : :obj:`str`
The directory path of the subject data
Attributes
----------
path : :obj:`str`
Specific directory path used.
Returns
-------
df : :class:`pandas.DataFrame`
Pandas dataframe of raw data.
_path : :obj:`list`
list of files used for analysis.
Notes
-----
You can either get data from all subjects within a directory, or from a specific subject (subject_session).
Examples
--------
>>> #if using path:
>>> df_raw = getData(path=self.config['path'])
>>> #if getting data for single subject:
>>> df_raw = getData(path=self.config['path'],subject_session=['1099','1', '0'])
"""
#if single subject get path from config
if (self.config['single_subject']):
path = '%s/%s_%sabc.csv'%(self.config['path'],self.config['subject'],self.config['session'])
#read csv as dataframe
df = pd.read_csv(path, float_precision='high')
##rename
if self.config['source'] != 'eyelink':
df = df.rename(columns={"trialNumTask": "TrialNum", "sample_time": "timestamp",
"isWindowSuccess": "is_calibrated",
"LEmotion": "left_mood","REmotion": "right_mood"})
#sort
df = df.sort_values(['TrialNum','timestamp'],ascending=[True, True]).reset_index(drop=True)
#set as eyetracking or behavioral
if self.config['source'] != 'eyelink':
##create column for type (eyetracking, behavioral)
df['type'] = np.where((df['isWebcamUsed']==True), 'eyetracking', 'behavioral')
return df, path
def filter_data(self, df, filter_type, config, **kwargs):
"""
Butterworth: Design an Nth-order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
df : :class:`pandas.DataFrame`
Pandas dataframe of raw data.
filter_type : :obj:`str`, optional
Type of filter.
config : :class:`dict`
Configuration data. i.e. trial number, location.
Attributes
----------
filter_type : :obj:`str`
Filter type: 'butterworth'
"""
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import butter,filtfilt,medfilt,savgol_filter
g_t= df['timestamp']
g_x = df['x']
g_y= df['y']
"""filters"""
#http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
#Butterworth filter
#note: butterworth cant have NaN data
if filter_type=='butterworth':
wn=self.config['f_b']['Wn']
order=self.config['f_b']['N']
b, a = butter(order, wn, 'lowpass', output='ba')
#b, a = butter(order, wn, 'low')
#converting pandas series to numpy ndarray
bg_t=df['timestamp']
bg_x=df['x']
bg_y=df['y']
f_x = filtfilt(b, a, bg_x)
f_y = filtfilt(b, a, bg_y)
#print(f_x)
fxy_df = pd.DataFrame({'x':f_x, 'y':f_y, 'timestamp':bg_t})
#breakpoint() #TODO!
#gaussian filter
elif filter_type=='gauss':
sigma=self.config['f_g']['sigma']
print('sigma: %s'%(sigma))
f_x = gaussian_filter1d(g_x,sigma)
f_y = gaussian_filter1d(g_y,sigma)
fxy_df = pd.DataFrame({'x':f_x, 'y':f_y, 'timestamp':g_t})
#median filter
elif filter_type=='median':
size=self.config['f_m']['size']
print('size: %s'%(size))
f_x=medfilt(g_x,size)
f_y=medfilt(g_y,size)
fxy_df = pd.DataFrame({'x':f_x, 'y':f_y, 'timestamp':g_t})
#http://scipy-cookbook.readthedocs.io/items/SavitzkyGolay.html
#savitzky golay filter
elif filter_type=='SavitzkyGolay':
window=self.config['f_sg']['window']
order=self.config['f_sg']['order']
print(self.console['orange'] + 'window: %s'%(window) + self.console['ENDC'])
print(self.console['orange'] + 'order: %s'%(order) + self.console['ENDC'])
#breakpoint() #TODO!
#f_x=analysis.savitzky_golay(g_x,window_size=window, order=order)
#f_y=analysis.savitzky_golay(g_y,window_size=window, order=order)
try:
f_x=savgol_filter(g_x, window_length=window, polyorder=order)
f_y=savgol_filter(g_y, window_length=window, polyorder=order)
fxy_df = pd.DataFrame({'x':f_x, 'y':f_y, 'timestamp':g_t})
except:
fxy_df = None
#mean filter
elif filter_type=='moving average':
weights=self.config['f_a']['weights']
print('weights: %s'%(weights))
weights=weights/np.sum(weights)
print('weights: %s'%(weights))
f_x=np.convolve(g_x, weights,'same')
f_y=np.convolve(g_y, weights,'same')
fxy_df = pd.DataFrame({'x':f_x, 'y':f_y, 'timestamp':g_t})
else:
raise ValueError('Unknown Filter Type: %s. Must be one of %s'%(filter_type,str(
['sg','butter','gauss','median','moving average'])))
return fxy_df
def classify(self, config, df, ctype='ivt', filter_type=None,
v_th=None, dr_th=None, di_th=None,
missing=None, maxdist=None, mindur=None):
"""
I-DT algorithm takes into account the distribution or spatial proximity of
eye position points in the eye-movement trace.
In the I-VT model, the velocity value is computed for every eye position
sample. The velocity value is then compared to the threshold. If the
sampled velocity is less than the threshold, the corresponding eye-position
sample is marked as part of a fixation, otherwise it is marked as a part of
a saccade.
The simple model detects fixations, defined as consecutive samples with an
inter-sample distance of less than a set amount of pixels (disregarding missing data)
Parameters
----------
config : :class:`dict`
Configuration data. i.e. trial number, location.
df : :class:`pandas.DataFrame`
Pandas dataframe of classified data.
ctype : :obj:`str`
Classification type: 'ivt'
filter_type : [type], optional
Filter type: 'butter'
ctype : :obj:`int`, optional
velocity threshold (ivt), dispersion threshold (idt; used by
SR-Research and Tobii), or simple
v_th : :obj:`str`
Velocity threshold in pix/sec (ivt)
dr_th : :obj:`str`
Fixation duration threshold in pix/msec (idt)
di_th : :obj:`str`
Dispersion threshold in pixels (idt)
missing : :obj:`str`
value to be used for missing data (simple)
maxdist : :obj:`str`
maximal inter sample distance in pixels (simple)
mindur : :obj:`str`
minimal duration of a fixation in milliseconds; detected fixation
cadidates will be disregarded if they are below this duration (simple)
Raises
------
ValueError
Unknown classification type.
Returns
-------
df : :class:`pandas.DataFrame`
Pandas dataframe of classified data.
"""
from . import Classify
if ctype == 'ivt':
cnfg = self.config
df = Classify.ivt(df, v_th, config=cnfg)
elif ctype == 'idt':
df = Classify.idt(df, di_th, dr_th)
elif ctype == 'hmm':
cnfg = self.config
df_, cxy_df = Classify.hmm(data=df, config=cnfg, filter_type=filter_type)
df = [df_, cxy_df]
elif ctype == 'simple':
df = Classify.simple(df, missing, maxdist, mindur)
else:
raise ValueError('Unknown classification type: %s. Must be one of %s' % (
ctype, str(['ivt', 'idt'])))
return df
def roi(self, filters=[['SavitzkyGolay','sg']], flt=None, df=None, manual=False, monitorSize=None):
"""Check if fixation is within bounds.
Attributes
----------
manual : :class:`str`
Whether or not processing.roi() is access manually.
monitorSize : :class:`list`
Monitor size.
filters : :class:`list`
Filter parameters.
df : :class:`pandas.DataFrame`
Pandas dataframe of classified data.
Returns
-------
df : :class:`pandas.DataFrame`
Pandas dataframe of classified data.
"""
#timestamp
t0 = datetime.datetime.now()
function = self.debug(message='t', source="timestamp")
for idx, itm in enumerate(filters):
#filter type
flt = itm[1]
print(self.console['orange'] + 'bounds: %s'%(itm) + self.console['ENDC'])
#if not running model.roi manually (posthoc)
if (manual == False):
#real bounding box
monitorSize=self.config['resolution.px']
else:
monitorSize = [int(i) for i in (df['monitorSize.px'].values[0]).split('x')]
#if x-resolution is smaller than 1400
if monitorSize[0] < 1400:
scaleImage = self.config['scaleImage']
#scale container
c = [1366*scaleImage,768*scaleImage]
#scale stimulus
image_x = 600*scaleImage #image size
image_y = 600*scaleImage #image size
#dont scale stimulus but create bounds larger than stimulus
else:
#scale roi #percentage of screen devoted to stim if window<=1400
#scaleROI = (1 + (600/1400))
#container
c = [1366,768]
#scale stimulus
image_x = 600 #image size
image_y = 600 #image size
#centers
cx_center = c[0]/2 #container x-center
image_y_c = image_y/2 #roi bound y-center
#bound_y_c = bound_y/2 #image y-center
resx_c = monitorSize[0]/2 #resolution x-center
resy_c = monitorSize[1]/2 #resolution y-center
#------------------------------------------creating stim bounds
#----------left stim bound
lsbx1, lsbx2 = (resx_c-cx_center, resx_c-cx_center+image_x)
lsby1, lsby2 = (resy_c+image_y_c, resy_c-image_y_c)
#----------right stim bound
rsbx1, rsbx2 = (resx_c+cx_center-image_x, resx_c+cx_center)
rsby1, rsby2 = (resy_c+image_y_c, resy_c-image_y_c)
#add to list
stim_bounds = [dict(bID='l', bx1=lsbx1,by1=lsby1,bx2=lsbx2,by2=lsby2),
dict(bID='r', bx1=rsbx1,by1=rsby1,bx2=rsbx2,by2=rsby2)]
#if not running processing.roi() manually (posthoc)
if (manual == False):
self.config['stim_bounds'] = stim_bounds
#------------------------------------------creating roi bounds
#----------left roi bound
##---region
lbx1, lbx2 = (0, lsbx2)
lby1, lby2 = (monitorSize[1], 0)
##---stim
#lbx1, lbx2 = (lsbx1, lsbx2)
#lby1, lby2 = (lsby2, lsby1)
#----------right roi bound
##---region
rbx1, rbx2 = (rsbx1, monitorSize[0])
rby1, rby2 = (monitorSize[1], 0)
##---stim
#rbx1, rbx2 = (rsbx1, rsbx2)
#rby1, rby2 = (rsby2, rsby1)
#add to list
roi_bounds = [dict(bID='l', bx1=lbx1,by1=lby1,bx2=lbx2,by2=lby2),
dict(bID='r', bx1=rbx1,by1=rby1,bx2=rbx2,by2=rby2)]
#if not running processing.roi() manually (posthoc)
if (manual == False):
self.config['roi_bounds'] = roi_bounds
#if using eyelink data and eyelink classification
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
pts = np.array(df[['x', 'y']])
#else use xy coordintes used to classify
else:
pts = np.array(df[['%s_x'%(flt), '%s_y'%(flt)]])
#---------------------------------------------------------------------------------------------left bounds
itm0 = roi_bounds[0] #x,y coordinates
L_Bll = np.array([itm0['bx1'], itm0['by2']]) # lower-left
L_Bur = np.array([itm0['bx2'], itm0['by1']]) # upper-right
#bool of coordinates within and outside of bounds
left_bound = np.all(np.logical_and(L_Bll <= pts, pts <= L_Bur), axis=1)
#-------------------------------------------------------------------------------------------right bounds
itm1 = roi_bounds[1] #x,y coordinates
R_Bll = np.array([itm1['bx1'], itm1['by2']]) # lower-left
R_Bur = np.array([itm1['bx2'], itm1['by1']]) # upper-right
#bool of coordinates within and outside of bounds
right_bound = np.all(np.logical_and(R_Bll <= pts, pts <= R_Bur), axis=1)
#if not running processing.roi() manually (posthoc)
if (manual == False):
#-----------------------------------------------------------------get roi
df = pd.concat([df, pd.DataFrame(np.vstack((left_bound, right_bound)).T,\
columns=['left_bound','right_bound'])], axis=1, sort=False)
#if eyelink data
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
#all fixation events
df['%s_fix_all'%(flt)] = df.apply(lambda x: True if (isinstance(x['fixation'], int))\
else False, axis=1)
#-------roi
#gaze and fixations within roi
df['%s_all_bounds'%(flt)] = df.apply(lambda x: 1 if (x['left_bound'] == True)\
else (2 if (x['right_bound'] == True) else False), axis=1)
##check if sample is within bounds and part of a fixation event
df['%s_fix_bounds'%(flt)] = df.apply(lambda x: x['%s_all_bounds'%(flt)]\
if (isinstance(x['fixation'], int)) else False, axis=1)
#webgazer data
else:
#If enough samples
if (self.config['too_few_samples'] == False):
#-----------------------------------------------------------------all fixation events
df['%s_fix_all'%(flt)] = df.apply(lambda x: True if (x['%s_class'%(flt)]==1) else False, axis=1)
#fixation index - count of fixation events per trial (i.e. fixation: 1, 2, 3, 4)
##drop non fixations and get index
p_df = df.drop(df[df['%s_fix_all'%(flt)] == False].index)
#-----------------------------------------------------------------fixation index
##add index to dataframe #subset dataframe to single column
p_df['%s_fix_index'%(flt)] = range(len(p_df))
p_df = p_df[['%s_fix_index'%(flt)]]
##add column to original dataframe
df = df.join(p_df)
#replace np.nan with "." (for R)
# df['%s_fix_index'%(flt)].where((df['%s_fix_index'%(flt)].notnull()), ".", inplace=True)
#------------------------------------fixation counter
#enumerate
df['enum'] = ((df['%s_fix_all'%(flt)] != df['%s_fix_all'%(flt)].shift(1)).cumsum()-1)
#reset non-fixations as None
df['fix_num'] = df.apply(lambda x: float(x['enum']) if (x['%s_fix_index'%(flt)] != '.') else None, axis=1)
#factorize and reset #finished
df['fix_num'] = pd.factorize(df['fix_num'])[0]
df['fix_num'] = df.apply(lambda x: float(x['fix_num']) if (x['%s_fix_index'%(flt)] != '.') else None, axis=1)
#------------------------------------roi
##gaze and fixations within roi
df['%s_all_bounds'%(flt)] = df.apply(lambda x: 1 if (x['left_bound'] == True)\
else (2 if (x['right_bound'] == True) else "."), axis=1)
##only fixation within roi
df['%s_fix_bounds'%(flt)] = df.apply(lambda x: x['%s_all_bounds'%(flt)]\
if (x['%s_class'%(flt)]!=None) else ".", axis=1)
##only fixation within roi
df['%s_fix_bounds_old'%(flt)] = df.apply(lambda x: x['%s_all_bounds'%(flt)]\
if (x['%s_class'%(flt)]==1) else ".", axis=1)
#------------------------------------total samples (left and right dwell) counter
df['sample_total'] = df.shape[0]
#----calculate dwell time
df['dwell'] = df.apply(lambda x: True if (x['left_bound']==True or x['right_bound']==True) else False, axis=1)
#not enough samples
else:
not_enough_samples = 'subject: %s, trial: %s; not enough samples'%(self.config['subject'], self.config['trial'])
self.debug(not_enough_samples)
print(self.console['red'] + not_enough_samples + self.console['ENDC'])
#add blank for fixation
df['%s_fix_index'%(flt)] = "."
df['%s_fix_all'%(flt)] = "."
df['fix_num'] = "."
df['%s_all_bounds'%(flt)] = "."
df['%s_fix_bounds'%(flt)] = "."
df['%s_fix_bounds_old'%(flt)] = "."
df['dwell'] = "."
df['sample_total'] = df.shape[0]
#----------------------------------------------------------------------------------------test confirmation
test = False
if test and (self.config['subject'] != self.current_subject):
self.current_subject = self.config['subject']
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
print(self.console['green'] + 'Preprocessing: Test to ensure ROI' + self.console['ENDC'])
#plot
#draw cross ###af2f2c #draw square ##2222b2 #draw triangle ##198D40
plt.figure(4, figsize=(9.6,5.4))
test = plt.gca()
#roi bounding boxes #lbx1, lbx2, lby1, lby2 #rbx1, rbx2, rby1, rby2
test.add_patch(patches.Rectangle((lbx1,lby2),width=lbx2,height=lby1,linewidth=1,facecolor='#396cbd33'))
test.add_patch(patches.Rectangle((rbx1,rby2),width=rbx2,height=rby1,linewidth=1,facecolor='#396cbd33'))
#gaze coordinates
test.plot(df['%s_x'%(flt)], df['%s_y'%(flt)], marker='+', markerfacecolor="#ffffff00",
markersize=6, markeredgecolor='#af2f2c', linewidth=0)
#all fixations
p_df = df.drop(df[df['%s_fix_all'%(flt)] == False].index)
test.plot(p_df['%s_x'%(flt)], p_df['%s_y'%(flt)], marker='s', markerfacecolor="#ffffff00",
markersize=6, markeredgecolor='#2222b2', linewidth=0)
#only fixations within roi
p_df = df[(df['%s_fix_all'%(flt)] == True) & (df['%s_fix_bounds'%(flt)] != ".")]
test.plot(p_df['%s_x'%(flt)], p_df['%s_y'%(flt)], marker='^', markerfacecolor="#ffffff00",
markersize=6, markeredgecolor='#198D40', linewidth=0)
#set limits
test.set_xlim([0,monitorSize[0]])
test.set_ylim([0,monitorSize[1]])
#position
###########left_image x0, left_image x1, center, right_image x0, right_image x1
x_ticks = [roi_bounds[0]['bx1'], roi_bounds[0]['bx2'], resx_c, roi_bounds[1]['bx1'], roi_bounds[1]['bx2']]
##########image y0, center, image y1
y_ticks = [roi_bounds[0]['by1'], resy_c, roi_bounds[0]['by2']]
#set ticks
test.set_xticks(x_ticks)
test.set_yticks(y_ticks)
x_ticks.sort()
y_ticks.sort()
test.grid(linestyle='--')
plt.gca().invert_yaxis()
#plt.show()
#save
dpi = self.config['fig_dpi']
emotion = self.config['emotion']
resolution = self.config['resolution.px']
img_title = 'gaze coordinates [subject:%s, session:%s, trial:%s, isCongruent:%s, left:%s, right:%s (%s, %s)]'\
%(self.config['subject'],self.config['session'],self.config['trial'],\
self.config['isCongruent'],emotion[0],emotion[1],\
resolution[0],resolution[1])
plt.suptitle(img_title, fontsize=12)
save_folder = os.path.abspath(self.config['path']['output']+ '/test/img/%s_%s_%s-roiPlot.png'\
%(self.config['subject'],self.config['session'],self.config['trial']))
plt.savefig(save_folder, dpi=dpi,transparent=False)
plt.close()
#--------finished
#timestamp
print(self.console['blue']+'%s finished in %s msec'%(function,
((datetime.datetime.now()-t0).total_seconds()*1000))+self.console['ENDC'])
#if not running processing.roi() manually (posthoc)
if (manual == False):
return df
else:
return stim_bounds, roi_bounds
def process(self, window, filters, gxy_df, trial, _classify=True, ctype='simple', _param='', log=False,
draw_plot=False, draw_heatmap=False, draw_gazeplot=False, draw_fixplot=False,
v_th=20,dr_th=200,di_th=20,
_missing=0.0, _maxdist=25, _mindur=50):
"""
Plotting and preparing data for classification. Combined plot of each filter.
Parameters
----------
window : :obj:`list`
horizontal, vertical resolution
filters : :obj:`list`
List of filters along with short-hand names.
gxy_df : :class:`pandas.DataFrame`
Pandas dataframe of raw data. Unfiltered raw data.
trial : :obj:`str`
Trial number.
_classify : :obj:`bool`
parameter to include classification
ctype : :obj:`str`
classification type. simple, idt, ivt
_param : :obj:`str`
[description] (the default is '', which [default_description])
log : :obj:`bool`
[description] (the default is False, which [default_description])
draw_plot : bool, optional
[description] (the default is False, which [default_description])
draw_heatmap : bool, optional
[description] (the default is False, which [default_description])
draw_gazeplot : bool, optional
[description] (the default is False, which [default_description])
draw_fixplot : bool, optional
[description] (the default is False, which [default_description])
v_th : :obj:`str`
Velocity threshold in px/sec (ivt)
dr_th : :obj:`str`
Fixation duration threshold in px/msec (idt)
di_th : :obj:`str`
Dispersion threshold in px (idt)
_missing : :obj:`bool`
value to be used for missing data (simple)
_maxdist : :obj:`str`
maximal inter sample distance in pixels (simple)
_mindur : :obj:`str`
minimal duration of a fixation in milliseconds; detected fixation
cadidates will be disregarded if they are below this duration (simple)
(default = 100)
Attributes
----------
_fxy_df : :class:`pandas.DataFrame`
Pandas dataframe of raw data. Filtered data. Subset of _fgxy_df.
Returns
-------
_fgxy_df : :class:`pandas.DataFrame`
Pandas dataframe of filtered data.
c_xy : :class:`pandas.DataFrame`
Pandas dataframe of classified data.
"""
#%matplotlib inline
#append filtered and raw data to list being prepared for export
l_gf = []
l_cxy = []
#append raw data
l_gf.append(gxy_df)
for idx, itm in enumerate(filters):
print(self.console['orange'] + 'filter: %s' % (itm[0]) + self.console['ENDC'])
#-------------------------------------------------------------------------parameters
c_xy = [] # classify data
#if using filter
if itm[0] != 'none':
fxy_df = self.filter_data(gxy_df, itm[0], self.config)
else:
fxy_df = gxy_df
#too few samples, trial should be passed
if fxy_df is None:
print(self.console['orange'] + 'too few samples' + self.console['ENDC'])
fxy_df = gxy_df
self.config['too_few_samples'] = True
fxy_df = fxy_df.reset_index(drop=True)
#------------------------------------------------------------fixation classification techniques
#store fixations from eyelink
if (self.config['source'] == 'eyelink'):
eyelink_c_xy = self.cgxy
#use eyelink calculated fixations if config.classify_eyelink = False
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
c_xy = self.cgxy
ctype = 'eyelink'
if ctype == 'idt':
# if eyelink and using original fixations
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
pass
else:
c_xy = self.classify(self, df=fxy_df, dr_th=dr_th, di_th=di_th, ctype=ctype)
if ctype == 'ivt':
# if eyelink and using original fixations
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
pass
else:
c_xy = self.classify(self, self.config, fxy_df, v_th=v_th, ctype=ctype)
if ctype == 'hmm':
# if eyelink and using original fixations
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
pass
# if if samples are too few
elif (self.config['too_few_samples']):
pass
else:
dfc_xy = self.classify(self.config, df=fxy_df, ctype=ctype, filter_type=itm[1])
fxy_df = dfc_xy[0]
c_xy = dfc_xy[1]
del dfc_xy
elif ctype == 'simple':
# if eyelink and using original fixations
if ((self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == False)):
pass
else:
c_xy = self.classify(self.config, fxy_df, ctype=ctype,missing=_missing, maxdist=_maxdist, mindur=_mindur)
c_xy = pd.DataFrame(c_xy[1], columns=['start', 'end', 'duration', 'cx', 'cy'])
#print eyelink orignal fixations
if (self.config['source'] == 'eyelink'):
print(self.console['green'] + 'eyelink fixations' + self.console['ENDC'])
print(eyelink_c_xy)
#print calculated fixations if from webgazer or calculating eyelink data
if ((self.config['source'] == 'webgazer') or
(self.config['source'] == 'eyelink') and (self.config['classify_eyelink_data'] == True)):
print(self.console['orange'] + 'calculated fixations' + self.console['ENDC'])
#print(c_xy)
#append filted data
#_fgxy_df = append_data(gxy_df, fxy_df, itm[1])
l_cxy.append(c_xy)
if itm[0] != 'none':
f_x = "%s_x" % (itm[1])
f_y = "%s_y" % (itm[1])
else:
f_x = 'x'
f_y = 'y'
#class variable is created from hmm classification
if (ctype == 'hmm') and (self.config['too_few_samples'] == False)\
and (set(["%s_class" % (itm[1])]).issubset(fxy_df.columns)):
fxy_df = fxy_df[['x', 'y', "%s_class" % (itm[1])]]
else:
fxy_df["%s_class" % (itm[1])] = None
fxy_df = fxy_df[['x', 'y', "%s_class" % (itm[1])]]
#rename and append filtered x and y-axis
fxy_df = fxy_df.rename(index=str, columns={"x": f_x, "y": f_y})
l_gf.append(fxy_df)
#combine list of all filtered and raw data
for indx, item in enumerate(l_gf):
l_gf[indx].index = range(len(l_gf[indx].index))
fgxy_df = pd.concat(l_gf, axis=1, join_axes=[l_gf[0].index])
return l_cxy, fgxy_df
def append_classify(self, df, cg_df):
"""Appending classification to Dataframe.
Parameters
----------
df : :obj:`list`
Pandas dataframe of raw data.
gxy_df : :class:`pandas.DataFrame`
Pandas dataframe of raw data of classification events.
"""
for index, item in enumerate(cg_df):
count = self.filters[index][1] + '_FID'
for idx, rw in item.iterrows():
start = rw['start']
end = rw['end']
#interval
df.loc[df['timestamp'].between(start, end, inclusive=True), count] = idx
return df
def run(self, path, task_type="eyetracking", single_subject=False, single_trial=False, subject=0, trial=0, isMultiprocessing=True, cores=1):
"""Processing of data. Steps here include: cleaning data, fixation identification, and exporting data.
Parameters
----------
path : :obj:`string`
Path of raw data.
task_type : :obj:`string`
Running analysis on `eyetracking` or `behavioral` data.
single_subject : :obj:`bool`
Whether to run function with all or single subject.
single_trial : :obj:`bool`
Whether to run function with all or single trial.
subject : :obj:`int`
Subject number. Only if single_subject = True.
trial : :obj:`int`
Trial number. Only if single_trial = True.
isMultiprocessing : :obj:`bool`
Whether multiprocessing of data will be used. Only if single_subject = False.
cores : :obj:`int`
Number of cores to use for multiprocessing. Only if single_subject = False & isMultiprocessing=True.
Attributes
----------
process : :obj:`bool`
Process all data for export.
"""
#----set config
#set path
self.config['path'] = path + '/' + self.config['task']
# single_subject
self.config['single_subject'] = single_subject
# is single_trial
self.config['single_trial'] = single_trial
# subject and trial number
self.config['subject'] = subject
self.config['trial'] = trial
# classification type
ctype = self.config['ctype']
# classification parameters
missing=self.config['missing']
maxdist=self.config['maxdist']
mindur=self.config['mindur']
v_th=self.config['v_th']
dr_th=self.config['dr_th']
di_th=self.config['di_th']
#single subject
if (self.config['single_subject']):
print(self.console['orange'] + 'start-----------------' + self.console['ENDC'])
print(self.console['orange'] + 'single_subject = %s, single_trial = %s'%(single_subject, single_trial) + self.console['ENDC'])
print(self.console['orange'] + 'importing raw data' + self.console['ENDC'])
#get core number and set as global variable
self.thisCore = 0
#get logger and set as global variable
self.log=logging.getLogger(__name__)
#get data
gxy_df, df_path = self.getData()
#check if eyetracking, else go to next subject
isEyetracking = gxy_df['type'][0]
if isEyetracking != 'eyetracking':
print(self.console['orange'] + 'finished-----------------' + self.console['ENDC'])
return
#drop practice and sort
gxy_df = gxy_df.drop(gxy_df[(gxy_df['event']=='Prac')].index)
gxy_df = gxy_df.sort_values(['TrialNum','timestamp','sampleNum'], ascending=[True, True, True]).reset_index(drop=True)
#if single trial
if self.config['single_trial']:
l_trials = [trial]
else:
#get list of all possible trials to pull data from
l_trials = gxy_df['TrialNum'].unique()
#remove nan from behavioral list (nan occurs when row trial_type == instructions)
if (task_type == "behavioral"):
l_trials = [x for x in l_trials if not np.isnan(x)]
#------------------------------------------------for each trial
l_fgxy = []
#start
for idx in l_trials:
#reset blocker for too few trials in filtering
self.config['too_few_samples'] = False
#set config trial number
self.config['trial'] = idx
print(self.console['orange'] + 'subject: %s'%(self.config['subject']) + self.console['ENDC'])
print(self.console['orange'] + 'session: %s'%(self.config['session']) + self.console['ENDC'])
print(self.console['orange'] + 'trial: %s'%(self.config['trial']) + self.console['ENDC'])
#filter data to single trial
df = gxy_df[gxy_df['TrialNum'] == idx].reset_index(drop=True)
#get isCongruent
if df['isCongruent'][0]:
self.config['isCongruent'] = 'Congruent'
else:
self.config['isCongruent'] = 'Incongruent'
#get emotion of left and right stimuli
self.config['emotion'] = [df['left_mood'][0], df['right_mood'][0]]
#1. preprocess data
#get monitorSize
monitorSize = (df['monitorSize.px'][0]).split('x')
##remove devicePixelRatio from monitor size
monitorSize[0] = float(monitorSize[0]) / df['devicePixelRatio'][0]
monitorSize[1] = float(monitorSize[1]) / df['devicePixelRatio'][0]
#store
monitorSize = list(map(int, monitorSize))
self.config['resolution.px'] = monitorSize
#reset and store previous version as 'monitorSize_old'
df['monitorSize_old'] = df['monitorSize.px']
df['monitorSize.px'] = '%sx%s'%(monitorSize[0],monitorSize[1])
#get scale
##scale container and stimulus if x-resolution is smaller than 1400
if monitorSize[0] < 1400:
scaleImage = monitorSize[0]/1400
##dont scale
else:
scaleImage = 1
self.config['scaleImage'] = scaleImage
##preprocess
df = self.preprocess(df, monitorSize)
#get eyelink fixations
print(self.console['blue'] + 'importing eyelink fixations' + self.console['ENDC'])
if (self.config['source'] == 'eyelink'):
self.cgxy = self.eyelink_classify()
#process data
print(self.console['blue'] + 'processing data' + self.console['ENDC'])
cgxy_df, fgxy_df = self.process(monitorSize, self.filters, df, self.config['trial'],
_classify=self.config['classify'], ctype=ctype,
_missing=missing, _maxdist=maxdist, _mindur=mindur,
v_th=v_th, dr_th=dr_th, di_th=di_th)
#append classify to dataframe
if self.config['classify']:
if self.config['ctype'] != 'hmm':
fgxy_df = self.append_classify(fgxy_df, cgxy_df)
#apply bounds on fixations
else:
fgxy_df=self.roi(filters=self.filters, flt=self.filters[0][1], df=fgxy_df)
#check if all samples are flagged as fixations
flt = self.filters[0][1]
samples_fix_err = fgxy_df[fgxy_df["%s_fix_all"%(flt)]==True].count()["%s_fix_all"%(flt)]
samples = len(fgxy_df)
if samples_fix_err == samples:
fgxy_df['samples_fix_err'] = True
else:
fgxy_df['samples_fix_err'] = False
#sort
fgxy_df = fgxy_df.sort_values(['TrialNum','sampleNum'],ascending=[True, True]).reset_index(drop=True)
#append to ltrials, if there are at least 20 samples
#this will be used to rebuild new dataframe for subject
if (fgxy_df.shape[0] >= 1):
print(self.console['green'] + 'Preprocessing: trials with at least 20 samples' + self.console['ENDC'])
l_fgxy.append(fgxy_df)
#combine list of dataframes into new dataframe
df = pd.concat(l_fgxy, sort=False, ignore_index=True)
df = df.sort_values(['TrialNum','sampleNum'],ascending=[True, True]).reset_index(drop=True)
#save data
print(self.console['blue'] + 'saving data' + self.console['ENDC'])
subject = int(self.config['subject'])
session = self.config['session']
if self.config['save_data']:
if self.config['single_trial']:
f_path = self.config['path']['processed'] + '/data/' + self.config['type'] + '/%s_%s_%s.csv'%(subject,session,trial)
else:
f_path = self.config['path']['processed'] + '/data/' + self.config['type'] + '/%s_%s.csv'%(subject,session)
#save
df.to_csv(f_path, index=False)
#finish
return l_fgxy, cgxy_df, fgxy_df
#all subjects
elif (not self.config['single_subject']):
print(self.console['orange'] + 'start-----------------' + self.console['ENDC'])
print(self.console['orange'] + 'single_subject = %s, single_trial = %s'%(single_subject, single_trial) + self.console['ENDC'])
print(self.console['blue'] + 'importing raw data' + self.console['ENDC'])
#--------------------------------------------------for each subject
def all_subjects(fdir, core):
try:
#for each file
for sbj in fdir:
print(self.console['blue'] + 'subject: %s'%(sbj) + self.console['ENDC'])
#set subject name, session
p = Path(sbj)
_subject, _session = (p.name.replace('abc', '').replace('.csv', '')).split("_", 1)
self.config['subject'] = _subject
self.config['session'] = _session
#get data
gxy_df, df_path = self.getData(path=sbj)
#if looking at eyetracking data
if (task_type == "eyetracking"):
#check if behavioral, else skip and go to next subject
isEyetracking = gxy_df['type'][0]
if isEyetracking != 'eyetracking':
print(self.console['orange'] + 'finished subject' + self.console['ENDC'])
continue
else:
pass
#else looking at behavioral data
elif (task_type == "behavioral"):
#check if behavioral, else skip and go to next subject
isBehavioral= gxy_df['type'][0]
if isBehavioral != 'behavioral':
print(self.console['orange'] + 'finished subject' + self.console['ENDC'])
continue
else:
pass
#drop practice and sort
gxy_df = gxy_df.drop(gxy_df[(gxy_df['event']=='Prac')].index)
gxy_df = gxy_df.sort_values(['TrialNum','timestamp','sampleNum'],ascending=[True, True, True]).reset_index(drop=True)
#if single trial
if self.config['single_trial']:
l_trials = [trial]
#else all trials
else:
#get list of all possible trials to pull data from
l_trials = gxy_df['TrialNum'].unique()
#remove nan from behavioral list (nan occurs when row trial_type == instructions)
if (task_type == "behavioral"):
l_trials = [x for x in l_trials if not np.isnan(x)]
#------------------------------------------------for each trial
l_fgxy = []
#start
for idx in l_trials:
#reset blocker for too few trials in filtering
self.config['too_few_samples'] = False
#set config trial number
self.config['trial'] = idx
print(self.console['orange'] + 'subject: %s'%(_subject) + self.console['ENDC'])
print(self.console['orange'] + 'session: %s'%(_session) + self.console['ENDC'])
print(self.console['orange'] + 'trial: %s'%(self.config['trial']) + self.console['ENDC'])
#filter data to single trial
df = gxy_df[gxy_df['TrialNum'] == idx].reset_index(drop=True)
#get isCongruent
if df['isCongruent'][0]:
self.config['isCongruent'] = 'Congruent'
else:
self.config['isCongruent'] = 'Incongruent'
#get emotion of left and right stimuli
self.config['emotion'] = [df['left_mood'][0], df['right_mood'][0]]
#get monitorSize
monitorSize = (df['monitorSize.px'][0]).split('x')
##remove devicePixelRatio from monitor size
monitorSize[0] = float(monitorSize[0]) / df['devicePixelRatio'][0]
monitorSize[1] = float(monitorSize[1]) / df['devicePixelRatio'][0]
##store
monitorSize = list(map(int, monitorSize))
self.config['resolution.px'] = monitorSize
#reset and store previous version as 'monitorSize_old'
df['monitorSize_old'] = df['monitorSize.px']
df['monitorSize.px'] = '%sx%s'%(monitorSize[0],monitorSize[1])
#get scale
##scale container and stimulus if x-resolution is smaller than 1400
if monitorSize[0] < 1400:
scaleImage = monitorSize[0]/1400
##dont scale
else:
scaleImage = 1
self.config['scaleImage'] = scaleImage
#1. preprocess data
## only if eyetracking data
if (task_type == "eyetracking"):
##preprocess
df = self.preprocess(df, monitorSize)
#get eyelink fixations
print(self.console['blue'] + 'importing eyelink fixations' + self.console['ENDC'])
if (self.config['source'] == 'eyelink'):
self.cgxy = self.eyelink_classify()
#process data, if eyetracking
print(self.console['blue'] + 'processing data' + self.console['ENDC'])
cgxy_df, fgxy_df = self.process(monitorSize, self.filters, df, self.config['trial'],
_classify=self.config['classify'],
ctype=ctype,
draw_plot=self.config['draw_plot'],
draw_heatmap=self.config['draw_heatmap'],
draw_gazeplot=self.config['draw_gazeplot'],
draw_fixplot=self.config['draw_fixplot'],
_missing=missing, _maxdist=maxdist, _mindur=mindur,
v_th=v_th, dr_th=dr_th, di_th=di_th)
#append classify to dataframe
if self.config['classify']:
if self.config['ctype'] != 'hmm':
fgxy_df = self.append_classify(fgxy_df, cgxy_df)
#apply bounds on fixations
else:
fgxy_df=self.roi(filters=self.filters, flt=self.filters[0][1], df=fgxy_df)
#check if all samples are flagged as fixations
flt = self.filters[0][1]
samples_fix_err = fgxy_df[fgxy_df["%s_fix_all"%(flt)]==True].count()["%s_fix_all"%(flt)]
samples = len(fgxy_df)
if samples_fix_err == samples:
fgxy_df['samples_fix_err'] = True
else:
fgxy_df['samples_fix_err'] = False
#sort
fgxy_df = fgxy_df.sort_values(['TrialNum','sampleNum'],
ascending=[True, True]).reset_index(drop=True)
# else if behavioral data continue
elif (task_type == "behavioral"):
fgxy_df = df
#append to ltrials
#this will be used to rebuild new dataframe for subject
if (fgxy_df.shape[0] >= 1):
l_fgxy.append(fgxy_df)
#combine list of dataframes into new dataframe
df = pd.concat(l_fgxy, sort=False, ignore_index=True)
df = df.sort_values(['TrialNum','sampleNum'],ascending=[True, True])\
.reset_index(drop=True)
#save data
print(self.console['blue'] + 'saving data' + self.console['ENDC'])
subject = int(self.config['subject'])
session = self.config['session']
if self.config['save_data']:
if self.config['single_trial']:
f_path = self.config['path']['processed'] + '/data/' + self.config['type'] + '/%s_%s_%s.csv'%(subject,session,trial)
else:
f_path = self.config['path']['processed'] + '/data/' + self.config['type'] + '/%s_%s.csv'%(subject,session)
#save
df.to_csv(f_path, index=False)
#--------------------------------------------------------------------------------------------------end
#if all_subjects fails, save to log
except Exception as e:
self.log.error(e, exc_info=True)
#--------------------------------------------------prepare
#multithreading
#list of behavioral and eyetracking
import multiprocessing
#prepare collecting arguements
arg = []
#get directory
fdir = glob.glob(os.path.join(self.config['path'] + "/*.csv"))
#get cpu cores to be used
cores = self.config['cores']
#if requested cores is 1, run without multiprocessing
if (cores == 1):
isMultiprocessing = False
##if requested cores are less than/equal 7, and less than available cores plus 1
elif (cores <= 7) and (multiprocessing.cpu_count() >= cores + 1):
isMultiprocessing = True
fdir_chunk = np.array_split(fdir, cores)
for index in range(0, cores):
arg.append((fdir_chunk[index],index))
##else use less than half of total available cores
else:
isMultiprocessing = True
cores = int(self.config['cores']/2)
fdir_chunk = np.array_split(fdir, cores)
#breakpoint()
#------------------------------------------run multiprocessing
#if not multiprocessing
if not isMultiprocessing:
all_subjects(fdir, cores)
#else multiprocessing
else:
proc = [multiprocessing.Process(target=all_subjects,\
args=(fdir_chunk[x].tolist(), x)) for x in range(cores)]
for p in proc:
p.daemon = True
p.start()
#-----------------------------------------finished
return cores, arg, proc
def subject_metadata(self, fpath, spath):
"""
Collect all subjects metadata.
Parameters
----------
fpath : :obj:`str`
The directory path of all participant data.
spath : :obj:`str`
The directory path of all participant data.
Returns
-------
df : :class:`ndarray`
Pandas dataframe of subject metadata.
"""
#----for timestamp
_t0 = datetime.datetime.now()
_f = self.debug(message='t', source="timestamp")
print(self.console['blue'] + 'running metadata.summary()' + self.console['ENDC'])
#----get directory
fdir = glob.glob(fpath + "/*.csv")
#----store metadata
l_sub = []
#----for each subject
for index, sbj in enumerate(fdir):
#read csv
df = pd.read_csv(sbj)
#add filename to df
df['file'] = sbj
#collect metadata for output by getting first row
l_sub.append(df[df['trial_type'] == 'dotprobe-task'].iloc[0])
#----convert list to dataframe
df = pd.concat(l_sub, axis=1, keys=[s.name for s in l_sub], sort=False).T.reset_index(drop=True)
#----format metadata and save
df = Metadata.summary(df=df, path=spath)
#----end
print(self.console['blue']+'%s finished in %s msec'%(_f,((datetime.datetime.now()-_t0).total_seconds()*1000))+self.console['ENDC'])
return df
def variables(self,df):
"""Output list of variables for easy html viewing.
Parameters
----------
df : :class:`pandas.DataFrame`
Pandas dataframe of raw data. This is used as a filter to prevent unused participants from being included in the data.
path : :obj:`str`
The directory path save and read the hdf5 dataframe.
Returns
-------
df_definitions : :class:`pandas.DataFrame`
"""
#blank df for appending
df_variable = pd.DataFrame()
source = {
'bias': ['init_gaze_bias','final_gaze_bias','gaze_bias','n_gaze_valid','dp_bias','var_dp_bias','n_dp_valid'],
'demographic':['race','is_normalvision','is_student'],
'behavioral': ['m_rt','accuracy','m_diff_stim','m_diff_dotloc'],
'clinical': ['cesd_score','cesd_group','rrs_brooding'],
'device': ['os','os_version','gpu','gpu_type','browser','browser_version', 'devicePixelRatio','monitorSize',
'windowSize','heap.used','heap.limit','WebcamMessage','webcamSize','webcam_brand','luminance',
'isPageVisible','is_calibrated','is_eyetracking','isFullscreen'],
'other': ['nested']
}
#for each source
for key, row in source.items():
#convert to correct formats
df_ = df[row].iloc[:2].loc[0,:].reset_index().rename(columns={'index':'variable', 0:'example'})
#add column for definitions, type; reorganize
df_['type'] = df[row].dtypes.to_frame().reset_index().rename(columns={0:'type'})['type']
df_['group'] = key
df_ = df_.loc[:,['variable','type','example','group']]
#if key == behavioral, add row for trialnum, and trialnum_
if key=='behavioral':
df_.loc[-1] = ['TrialNum','int64',1,'behavioral']
df_.loc[-1] = ['TrialNum_','int64',1,'behavioral']
df_.loc[-1] = ['trialType','int64',1,'behavioral']
df_.loc[-1] = ['trialType_','object','iaps','behavioral']
#append
df_variable = df_variable.append(df_)
#reset index
df_variable = df_variable.reset_index(level=0, drop=True)
#import list of definitions and add to dataframe
#to initially get list of variable to fill in definitions #df_variable.to_csv(definitions_path, index=None)
#import definitions and merge to variables list
definitions_path = self.config['path']['output'] + "/analysis/definitions.csv"
df_definitions = pd.read_csv(definitions_path, float_precision='high')
df_variable = pd.merge(df_variable, df_definitions, on='variable')
#change order
df_variable = df_variable[['variable','group','type','example','definition']]
return df_variable
def dwell(self, df, cores=1):
"""
Calculate dwell time for sad and neutral images.
Parameters
----------
df : :class:`pandas.DataFrame`
Pandas dataframe of raw data. This is used as a filter to prevent unused participants from being included in the data.
cores : :class:`int`
Number of cores to use for multiprocessing.
Returns
-------
df : :class:`pandas.DataFrame`
Pandas dataframe with dwell time.
error : :class:`list`
List of participants that were not included in dataframe.
"""
import multiprocessing, itertools
#----for timestamp
_t0 = datetime.datetime.now()
_f = self.debug(message='t', source="timestamp")
print(self.console['blue'] + 'running dwell()' + self.console['ENDC'])
#----run
def run(dir_=None, core=None, queue=None):
print(self.console['green'] + 'processing.dwell.run(%s)'%(core) + self.console['ENDC'])
dwell = []
#----for each subject
for index, row in dir_.iterrows():
_path_ = row['path']
_subject = row['participant']
#----read csv as dataframe
df_ = pd.read_csv(_path_, float_precision='high', low_memory=False)
print('subject: %s; core: %s'%(_subject, core))
#----if 198 trials continue else skip
if (df_.drop_duplicates(subset="TrialNum", keep="first").shape[0] == 198):
#----for each trial
for _trial in range(0,198):
try:
#----keep relevant trial
df_0 = df_.loc[df_['TrialNum'] == _trial].reset_index(drop=True)
#----get trialType
_trialType = df_0['trialType'][0]
#----drop columns
df_0 = df_0[['participant','TrialNum','timestamp','marker','dwell','trialType',
'left_mood','right_mood','left_bound','right_bound']]
#----get range between "stimulus onset" and "dotloc onset"
#get start and end markers
start_m = df_0[df_0['marker'] == "Stimulus Onset"].index.item()
end_m = df_0[df_0['marker'] == "Dotloc Onset"].index.item()
#get dataframe
df_1 = df_0.iloc[start_m:end_m+1,:].reset_index(drop=True)
#get difference between timestamp values
df_1['difference'] = df_1['timestamp'].shift(-1) - df_1['timestamp']
#----get location of sad/neutral images
if ((df_1['left_mood'][0]=='Sad') and (df_1['right_mood'][0]=='Neutral')):
sad = 'left_bound'
neutral = 'right_bound'
else:
neutral = 'left_bound'
sad = 'right_bound'
#----get emotional dwell number of samples, if gaze is within dwell location
# nested by aoi (for within-group analysis)
##neutral
df_neutral = df_1[(df_1[neutral]==True)]
## subject, trial, aoi, dwell_num, dwell_time
dwell.append([_subject, _trial, _trialType, 'neutral', df_neutral.shape[0], df_neutral['difference'].sum()])
##sad
df_sad = df_1[(df_1[sad]==True)]
## subject, trial, aoi, dwell_num, dwell_time
dwell.append([_subject, _trial, _trialType, 'sad', df_sad.shape[0], df_sad['difference'].sum()])
#if exception
except Exception as e:
line = sys.exc_info()[-1].tb_lineno
print('subject: %s; trial: %s; error: %s; line: %s'%(_subject, _trial, e, line))
else:
print('subject:%s; error:too few trials'%(_subject))
#----add to multithreading queue
if isMultiprocessing:
#queue
queue.put(dwell)
if not isMultiprocessing:
return dwell
#----finished
def finished(output, error=None):
print(self.console['green'] + 'processing.dwell.finished()' + self.console['ENDC'])
if isMultiprocessing:
#----create output df
df = pd.DataFrame(list(itertools.chain(*output)),
columns=['participant','trial','trialType','aoi','dwell_num','dwell_time'])
#----create error df
#check if data came out correctly by looking at ampunt of trials and participants outputted
error = df.groupby(['participant','aoi']).agg(['count'])
else:
#----create output df
df = pd.DataFrame(output, columns=['participant','trial','trialType','aoi','dwell_num','dwell_time'])
#check if data came out correctly by looking at ampunt of trials and participants outputted
error = df.groupby(['participant','aoi']).agg(['count'])
#----end
#timestamp
print(self.console['blue']+'%s finished in %s msec'%(_f,((datetime.datetime.now()-_t0).total_seconds()*1000))+self.console['ENDC'])
return df, error
#--------------------get list of all participants
##get directory
_dir = glob.glob(os.path.join(self.config['path']['processed'] + '/data/eyetracking' + "/*.csv"))
_sbj_session = [(Path(x).name.replace('abc', '').replace('.csv', '')).split("_", 1) for x in _dir]
dir_ = [[z,*x] for z,x in zip(_dir,_sbj_session)]
dir_ = pd.DataFrame(dir_, columns=['path','participant','session'])
#------------------------------------------check if running using multiprocessing
#----get cores
##if requested cores is 1, run without multiprocessing
if ((cores == 0) or (cores == 1)):
isMultiprocessing = False
print(self.console['green'] + 'processing.dwell() not multiprocessing' + self.console['ENDC'])
##else if requested cores are less than/equal 7, and less than available cores plus 1
elif ((cores <= 7) and (multiprocessing.cpu_count() >= cores + 1)):
isMultiprocessing = True
dir_p = np.array_split(dir_, cores)
print(self.console['green'] + 'processing.dwell() multiprocessing with %s cores'%(cores) + self.console['ENDC'])
##else use less than half of total available cores
else:
isMultiprocessing = True
cores = int(cores/2)
dir_p = np.array_split(_dir, cores)
print(self.console['green'] + 'processing.dwell() multiprocessing with %s cores'%(cores) + self.console['ENDC'])
#------------------------------------------multiprocessing
#if not multiprocessing
if not isMultiprocessing:
#----start
output = run(dir_, cores)
#----after finished
df_dwell, df_error = finished(output=output)
#----merge
df_dwell["participant"] = df_dwell[["participant"]].astype(np.int64)
df = df_dwell.merge((df[['cesd_group','participant']].drop_duplicates(subset="participant", keep="first")), on='participant')
#----finished
return df, df_error
#else multiprocessing
else:
#collect each pipe (this is used to build send and recieve portions of output)
queue = multiprocessing.Queue()
#----collect each thread
process = [multiprocessing.Process(target=run,args=(dir_p[core], core, queue,)) for core in range(cores)]
#start each thread
for p in process:
p.daemon = True
p.start()
#return queues
#note: see https://stackoverflow.com/a/45829852
returns = []
for p in process:
returns.append(queue.get())
#wait for each process to finish
for p in process:
p.join()
#---- finished multiprocessing
df_dwell, df_error = finished(output=returns)
#----merge
df_dwell["participant"] = df_dwell[["participant"]].astype(np.int64)
df = df_dwell.merge((df[['cesd_group','participant']].drop_duplicates(subset="participant",keep="first")), on='participant')
return df, df_error
def onset_diff(self, df0, merge=None, cores=1):
"""Calculate differences in onset presentation (stimulus, dotloc) using bokeh, seaborn, and pandas.
Parameters
----------
df0 : :class:`pandas.DataFrame`
Pandas dataframe of raw data. This is used to merge variables that may be useful for analysis.
merge : :class:`list` or `None`
Variables to merge into returned df.
cores : :class:`int`
Number of cores to use for multiprocessing.
Returns
-------
df1 : :class:`pandas.DataFrame`
Pandas dataframe.
error : :class:`pandas.DataFrame`
Dataframe of each participants and the amount trials included in their data.
drop : :class:`list`
List of participants that are 3 SD from median.
"""
import multiprocessing
#timestamp
_t0 = datetime.datetime.now()
_f = self.debug(message='t', source="timestamp")
#----run
def run(dir_=None, core=None, queue=None):
print(self.console['green'] + 'processing.onset_diff.run(%s)'%(core) + self.console['ENDC'])
onset = []
#----for each subject
for index, row in dir_.iterrows():
_path_ = row['path']
_subject = row['participant']
#----read csv as dataframe
df = pd.read_csv(_path_, float_precision='high', low_memory=False)
print('subject: %s; core: %s'%(_subject, core))
#----if 198 trials continue else skip
if (df.drop_duplicates(subset="TrialNum", keep="first").shape[0] == 198):
#----format column
df['RT'] = df['RT'].astype('float64')
#----calculate number of samples in trial
df['trial_samples'] = df.groupby('TrialNum')['type'].transform(len)
#condense to subject:trial-level
df = df.loc[df.drop_duplicates(subset="TrialNum", keep="first").index].reset_index(level=0, drop=True)
#----calculate scores
# stimulus onset
df['diff_stim'] = df.apply(lambda x: abs(x['Stim_onset.t'] - 1500), axis=1)
# dotloc onset
df['diff_dotloc'] = df.apply(lambda x: abs(x['DotLoc_onset.t'] - (1500 + 4500))
if (x['trialType'] == 'iaps') else abs(x['DotLoc_onset.t'] - abs(1500 + 3000)), axis=1)
#----relevant data only
#keep important columns
df = df[['participant', 'TrialNum', 'diff_dotloc', 'diff_stim', 'Key_Resp.rt','Key_Resp.acc','trialType']]
#group by trial
#store to list
onset.append(df)
else:
print('%s; error: too few trials'%(_subject))
#----add to multithreading queue
if isMultiprocessing:
#queue
queue.put(onset)
if not isMultiprocessing:
return onset
#--------finished
def finished(output, isMultiprocessing, error=None):
#----concat list of dataframes
if isMultiprocessing:
#flatten
output_ = [itm for l in output for itm in l]
df = pd.concat(output_)
else:
df = pd.concat(output)
#rename
df = df.rename(columns={'Key_Resp.rt':'Key_Resp_rt','Key_Resp.acc':'Key_Resp_acc'})
#--get median onset error
df['m_diff_dotloc'] = df.groupby(["participant"])['diff_dotloc'].transform('median')
df['m_diff_stim'] = df.groupby(["participant"])['diff_stim'].transform('median')
df['m_rt'] = df.groupby(["participant"])['Key_Resp_rt'].transform('median')
#--get accuracy
df['accuracy'] = df.groupby(["participant"])['Key_Resp_acc'].transform('sum')
#--transform trial number to 0-1 for analysis
df['TrialNum_'] = df.groupby(["participant"])['TrialNum'].transform(lambda x: x / 198)
#----store participants with diff_dotloc or diff_stim > 500
#drop dotloc diff time 3 SD > mean
##diff_dotloc
error_std = df['diff_dotloc'].std() * 3
drop_1 = list(df.loc[df['diff_dotloc'] > error_std].drop_duplicates(subset="participant", keep="first")['participant'])
##diff_stim
error_std = df['diff_stim'].std() * 3
drop_2 = list(df.loc[df['diff_stim'] > error_std].drop_duplicates(subset="participant", keep="first")['participant'])
drop = list(np.unique(drop_1 + drop_2))
#create new column where if onset diff greater than 500msec, then True, else False
df['onset>500'] = np.where(df['participant'].isin(drop), True, False)
#check if data came out correctly by looking at ampunt of trials and participants outputted
error = df.groupby(['participant']).agg(['count'])
#merge with df
if merge is not None:
df["participant"] = df[["participant"]].astype(np.int64)
df1 = df.merge((df0[merge].drop_duplicates(subset="participant", keep="first")), on='participant')
#check differences
l_df1 = df1['participant'].unique().tolist()
l_df = df['participant'].unique().tolist()
missing = set(l_df1) ^ set(l_df)
#----end
print(self.console['blue']+'%s finished in %s msec'%(_f,((datetime.datetime.now()-_t0).total_seconds()*1000))+self.console['ENDC'])
return df1, error, drop
#--------------------get list of all participants
##get directory
_dir = glob.glob(os.path.join(self.config['path']['processed'] + '/data/eyetracking' + "/*.csv"))
_sbj_session = [(Path(x).name.replace('abc', '').replace('.csv', '')).split("_", 1) for x in _dir]
dir_ = [[z,*x] for z,x in zip(_dir,_sbj_session)]
dir_ = pd.DataFrame(dir_, columns=['path','participant','session'])
#------------------------------------------check if running using multiprocessing
#----get cores
##if requested cores is 1, run without multiprocessing
if ((cores == 0) or (cores == 1)):
isMultiprocessing = False
print(self.console['green'] + 'processing.onset_diff() not multiprocessing' + self.console['ENDC'])
##else if requested cores are less than/equal 7, and less than available cores plus 1
elif ((cores <= 7) and (multiprocessing.cpu_count() >= cores + 1)):
isMultiprocessing = True
dir_p = np.array_split(dir_, cores)
print(self.console['green'] + 'processing.onset_diff() multiprocessing with %s cores'%(cores) + self.console['ENDC'])
##else use less than half of total available cores
else:
isMultiprocessing = True
cores = int(cores/2)
dir_p = np.array_split(_dir, cores)
print(self.console['green'] + 'processing.onset_diff() multiprocessing with %s cores'%(cores) + self.console['ENDC'])
#------------------------------------------multiprocessing
#if not multiprocessing
if not isMultiprocessing:
#----start
output = run(dir_, cores)
#----after finished
df_onset, df_error, drop = finished(isMultiprocessing=isMultiprocessing, output=output)
return df_onset, df_error, drop
#else multiprocessing
else:
#collect each pipe (this is used to build send and recieve portions of output)
#pipes = [multiprocessing.Pipe(False) for pipe in range(cores)]
queue = multiprocessing.Queue()
#----collect each thread
process = [multiprocessing.Process(target=run,args=(dir_p[core], core, queue,)) for core in range(cores)]
#start each thread
for p in process:
p.daemon = True
p.start()
#return queues
#note: see https://stackoverflow.com/a/45829852
returns = []
for p in process:
#queue.get() will block #https://stackoverflow.com/a/45829852
returns.append(queue.get())
#wait for each process to finish
for p in process:
p.join()
#---- finished multiprocessing
df_onset, df_error, drop = finished(isMultiprocessing=isMultiprocessing, output=returns)
return df_onset, df_error, drop |
train_pg.py | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
input = tf.layers.dense(input_placeholder, size, activation=activation)
layers = [input]
for i in range(1, n_layers):
layers.append(tf.layers.dense(layers[-1], size, activation=activation))
return tf.layers.dense(layers[-1], output_size, activation=output_activation)
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
# sy_adv_n = TODO
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, ac_dim, "", n_layers=3, size=64, activation=tf.tanh, output_activation=tf.softmax)
sy_sampled_ac = tf.multinomial(sy_logits_na, None) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.log(tf.multiply(sy_ac_na, sy_sampled_ac))
else:
# YOUR_CODE_HERE
# sy_mean = TODO
# sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
# sy_sampled_ac = TODO
# sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = TODO # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_update_op = TODO
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_n = TODO
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = TODO
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
mapper.py | import numpy as np
import matplotlib.pyplot as plt
import os
import serial
import time
import json
import datetime
from frghardware.components.stage import Thorlabs_LTS150_xy
from frghardware.components.mono import Mono
from frghardware.components.daq import DAQ
import datetime
from tqdm import tqdm
import h5py
import threading
from frghardware.components.nkt import Compact, Select
from frghardware.components.tec import Omega
from pymeasure.instruments.srs import SR830
root = 'D:\\frgmapper'
if not os.path.exists(root):
os.mkdir(root)
datafolder = os.path.join(root, 'Data')
if not os.path.exists(datafolder):
os.mkdir(datafolder)
DEFAULT_FLUSH_INTERVAL = 10
### Settings for different PDA10DT Bandwidths:
# 1k:
# daq.countsPulseDuration = 50
# daq.countsPerTrigger = 100
# compact.setPulseFrequency(495)
#
# 10k:
# daq.countsPulseDuration = 20
# daq.countsPerTrigger = 30
# compact.setPulseFrequency(1665)
class controlGeneric(object):
def __init__(self, dwelltime = 0.2):
todaysDate = datetime.datetime.now().strftime('%Y%m%d')
self.outputdir = os.path.join(root, datafolder, todaysDate)
self.__hardwareSetup = 'mono' #distinguish whether saved data comes from the mono or nkt setup
self.__dwelltime = dwelltime
self.__baselineTaken = False
self.__baseline = {}
self.__flushinterval = DEFAULT_FLUSH_INTERVAL
self.__flushcounter = 0
plt.ion() #make plots of results non-blocking
@property
def dwelltime(self):
return self.__dwelltime
@dwelltime.setter
def dwelltime(self, x):
# sets daq counts to match desired measurement time (x, in seconds)
self.daq.dwelltime = x
self.__dwelltime = x
# user methods
def generatewavelengths(self, wmin = 1700, wmax = 2000, wsteps = 151):
wavelengths = np.linspace(wmin, wmax, wsteps)
return wavelengths
def takebaseline(self, wavelengths):
self.__flushinterval = np.inf
# clean up wavelengths input
wavelengths = np.array(wavelengths)
#light baseline
self.__baseline['Wavelengths'] = wavelengths
self.__baseline['LightRaw'], self.__baseline['LightRefRaw'] = self._scanroutine(wavelengths)
try:
self.__baseline['Light'] = np.divide(self.__baseline['LightRaw'], self.__baseline['LightRefRaw'])
except:
print(self.__baseline['LightRaw'])
print(self.__baseline['LightRefRaw'])
#dark baseline
# if not self.processPulseTrain:
storeddwelltime = self.__dwelltime
storedUseExtClock = self.daq.useExtClock
self.dwelltime = 5 #take a long acquisition for the dark baseline, as it is a single point measurement
self.daq.useExtClock = False
out = self.daq.read()
self.dwelltime = storeddwelltime
self.daq.useExtClock = storedUseExtClock
self.__baseline['DarkRaw'] = out['IntSphere']['Mean']
self.__baseline['DarkRefRaw'] = out['Reference']['Mean']
self.__baseline['Dark'] = self.__baseline['DarkRaw'] / self.__baseline['DarkRefRaw']
self.__baselineTaken = True
self.__flushinterval = DEFAULT_FLUSH_INTERVAL
def scanpoint(self, label, wavelengths = None):
def preparefile(f):
dummy1d = np.full(wavelengths.shape, np.nan)
info, settings, baseline, completed = self._save_generic(f, label = label, scantype = 'scanpoint')
# raw data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during point scan.'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned.'
reflectance = rawdata.create_dataset('reflectance', data = dummy1d)
reflectance.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as fraction (0-1), not percent!'
signal = rawdata.create_dataset('signalRaw', data = dummy1d)
signal.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
reference = rawdata.create_dataset('referenceRaw', data = dummy1d)
reference.attrs['description'] = 'Raw signal for reference detector. (V)'
f.swmr_mode = True # Single Writer Multiple Reader, allows h5 file to be read during scan.
f.flush()
return signal, reference, reflectance, completed
wavelengths = self._cleanwavelengthinput(wavelengths)
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as self.f:
signal, reference, reflectance, completed = preparefile(self.f)
self.__flushcounter = 0
self.__flushinterval = 5
signal[()], reference[()] = self._scanroutine(wavelengths)
self.__flushinterval = DEFAULT_FLUSH_INTERVAL
reflectance[()] = self._baselinecorrectionroutine(wavelengths, signal, reference)
completed = 1
def scanline(self, label, axis, pmin, pmax, steps, wavelengths = None, p0 = None):
def preparefile(f):
dummy1d = np.full((steps, ), np.nan)
dummy2d = np.full((steps, len(wavelengths)), np.nan)
if axis == 'x':
idlepos = self.stage.position[1]
idleaxis = 'y'
else:
idlepos = self.stage.position[0]
idleaxis = 'x'
info, settings, baseline, completed = self._save_generic(f, label = label, scantype = 'scanline')
## add scan parameters to settings
temp = settings.create_dataset('num', data = np.array(allpts.shape[0]))
temp.attrs['description'] = f'Number of points scanned in {axis}'
stepsize = np.abs(allpts[1] - allpts[0])
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Step size (mm) in line scan.'
temp = settings.create_dataset('axis', data = axis.encode('utf-8'))
temp.attrs['description'] = 'Axis scanned (x or y)'
temp = settings.create_dataset('idle_axis', data = idle_axis.encode('utf-8'))
temp.attrs['description'] = 'Axis not scanned (x or y)'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('idle_pos', data = idlepos)
temp.attrs['description'] = f'{idleaxis} coordinate, did not change throughout line scan'
temp = rawdata.create_dataset('pos', data = np.array(allpts))
temp.attrs['description'] = f'Absolute {axis} coordinate (mm) per point'
temp = rawdata.create_dataset('relpos', data = np.array(allpts - np.min(allpts)))
temp.attrs['description'] = f'Relative {axis} coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
reflectance = rawdata.create_dataset('reflectance', data = dummy2d)
reflectance.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
signal = rawdata.create_dataset('signalRaw', data = dummy2d)
signal.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
reference = rawdata.create_dataset('referenceRaw', data = dummy2d)
reference.attrs['description'] = 'Raw signal for reference detector. (V)'
delay = rawdata.create_dataset('delay', data = dummy1d)
delay.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
f.swmr_mode = True # Single Writer Multiple Reader, allows h5 file to be read during scan.
f.flush()
return reflectance, signal, reference, delay, completed
wavelengths = self._cleanwavelengthinput(wavelengths)
if str.lower(axis) == 'x':
axis = 'x'
idle_axis = 'y'
elif str.lower(axis) == 'y':
axis = 'y'
idle_axis = 'x'
else:
print('Error: axis must equal \'x\' or \'y\': user provided {0}'.format(axis))
return
x0, y0 = self.stage.position #if no p0 specified, assume coordinates are relative to current position
if p0 is None:
if axis == 'x':
p0 = x0
else:
p0 = y0
allpts = np.linspace(p0 + pmin, p0 + pmax, steps) #coordinates are relative to center coordinate p0
if axis == 'x':
xval = allpts
yval = y0
else:
xval = x0,
yval = allpts
fpath = self._getsavepath(label = label) #generate filepath for saving data
self.__flushcounter = 0
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as self.f:
reflectance, signal, reference, delay, completed = preparefile(self.f)
firstscan = True
lastscan = False
startTime = time.time()
for idx, p in tqdm(enumerate(allpts), desc = 'Scanning {0}'.format(axis), total = steps, leave = False):
if idx == steps-1:
lastScan = True
if axis == 'x':
moveThread = threading.Thread(target = self.stage.moveto, args = (p, y0))
else:
moveThread = threading.Thread(target = self.stage.moveto, args = (x0, p))
moveThread.start()
wlThread = threading.Thread(target = self._goToWavelength, args = (wavelengths[0],))
wlThread.start()
moveThread.join()
wlThread.join()
signal[idx, :], reference[idx, :] = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan)
reflectance[idx, :] = self._baselinecorrectionroutine(wavelengths, signal[idx, :], reference[idx, :])
delay[idx] = time.time() - startTime #time in seconds since scan began
firstscan = False
completed = 1
self.stage.moveto(x = x0, y = y0) #go back to map center position
self._lightOff()
def findarea(self, wavelength, xsize = 30, ysize = 30, xsteps = 40, ysteps = 40, plot = True, export = False):
### method to find sample edges. does two line scans in a cross over the sample at a single wavelength.
# clean up wavelengths input
self.__flushinterval = np.inf
wavelength = self._cleanwavelengthinput(wavelength)
if wavelength.shape[0] > 1:
print('Please use a single wavelength for findarea - aborting')
return False
# self.stage.gotocenter() #go to center position, where sample is centered on integrating sphere port. Might need to remove this line later if inconvenient
x0, y0 = self.stage.position
allx = np.linspace(x0 - xsize/2, x0 + xsize/2, xsteps)
ally = np.linspace(y0 - ysize/2, y0 + ysize/2, ysteps)
self._goToWavelength(wavelength[0])
self._lightOn()
self.stage.moveto(x = allx[0], y = y0)
xdata = np.zeros((xsteps,))
for idx, x in tqdm(enumerate(allx), desc = 'Scanning X', total = allx.shape[0], leave = False):
self.stage.moveto(x = x)
out = self.daq.read()
signal = [out['IntSphere']['Mean']]
ref = [out['Reference']['Mean']]
xdata[idx] = self._baselinecorrectionroutine(wavelengths = wavelength, signal = signal, reference = ref)
self._lightOff()
self.stage.moveto(x = x0, y = ally[0])
self._lightOn()
ydata = np.zeros((ysteps,))
for idx, y in tqdm(enumerate(ally), desc = 'Scanning Y', total = ally.shape[0], leave = False):
self.stage.moveto(y = y)
out = self.daq.read()
signal = [out['IntSphere']['Mean']]
ref = [out['Reference']['Mean']]
ydata[idx]= self._baselinecorrectionroutine(wavelengths = wavelength, signal = signal, reference = ref)
self._lightOff()
self.stage.moveto(x = x0, y = y0) #return to original position
center = [None, None]
size = [None, None]
if plot:
fig, ax = plt.subplots(2,1)
# ax[0].plot(allx, xdata)
center[0], size[0] = self._findedges(allx, xdata, ax = ax[0])
ax[0].set_xlabel('X Position (mm)')
ax[0].set_ylabel('Reflectance at {0} nm'.format(wavelength[0]))
ax[0].set_title('X Scan')
# ax[1].plot(ally, ydata)
center[1], size[1] = self._findedges(ally, ydata, ax = ax[1])
ax[1].set_xlabel('Y Position (mm)')
ax[1].set_ylabel('Reflectance at {0} nm'.format(wavelength[0]))
ax[1].set_title('Y Scan')
plt.tight_layout()
plt.show()
# print + return the centroid, width/bounds if found (currently no sanity checking to see if bounds are realistic, rely on user to judge the plots for themselves)
print('Suggested scanarea parameters:\n\tx0 = {0}\n\ty0 = {1}\n\txsize = {2}\n\tysize = {3}\n'.format(center[0], center[1], size[0], size[1]))
self.__flushinterval = DEFAULT_FLUSH_INTERVAL
return center, size
def scanarea(self, label, xmin, xmax, xsteps, ymin, ymax, ysteps, wavelengths = None, x0 = None, y0 = None, export = True):
def preparefile(f):
dummy2d = np.full((ysteps, xsteps), np.nan)
dummy3d = np.full((ysteps, xsteps, len(wavelengths)), np.nan)
info, settings, baseline, completed = self._save_generic(f, label = label, scantype = 'scanarea')
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(allx.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(ally.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(allx[-1] - allx[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(ally[-1] - ally[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if allx.shape[0] > 1:
stepsize = stepsize + np.abs(allx[1] - allx[0])
countedaxes = countedaxes + 1
if ally.shape[0] > 1:
stepsize = stepsize + np.abs(ally[1] - ally[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0 '
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(allx))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(ally))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(allx - np.min(allx)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(ally - np.min(ally)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
reflectance = rawdata.create_dataset('reflectance', data = dummy3d)
reflectance.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
signal = rawdata.create_dataset('signalRaw', data = dummy3d)
signal.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
reference = rawdata.create_dataset('referenceRaw', data = dummy3d)
reference.attrs['description'] = 'Raw signal for reference detector. (V)'
delay = rawdata.create_dataset('delay', data = dummy2d)
delay.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
f.swmr_mode = True # Single Writer Multiple Reader, allows h5 file to be read during scan.
return reflectance, signal, reference, delay, completed
# clean up wavelengths input
wavelengths = self._cleanwavelengthinput(wavelengths)
currentx, currenty = self.stage.position # unless otherwise specified, assume center coordinates x0,y0 are current position
if x0 is None:
x0 = currentx
if y0 is None:
y0 = currenty
allx = np.linspace(x0 + xmin, x0 + xmax, xsteps)
ally = np.linspace(y0 + ymin, y0 + ymax, ysteps)
fpath = self._getsavepath(label = label) #generate filepath for saving data
self.__flushcounter = 0
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as self.f:
reflectance, signal, reference, delay, completed = preparefile(self.f)
firstscan = True
lastscan = False
reverse= -1 # for snaking
startTime = time.time()
for yidx, y in tqdm(enumerate(ally), desc = 'Scanning Y', total = ally.shape[0], leave = False):
reverse=reverse*(-1)
for xidx, x in tqdm(enumerate(allx), desc = 'Scanning X', total = allx.shape[0], leave = False):
if xidx == xsteps-1 and yidx == ysteps-1:
lastScan = True
# Condition to map in a snake pattern rather than coming back to first x point
wlThread = threading.Thread(target = self._goToWavelength, args = (wavelengths[0],))
wlThread.start()
if reverse > 0: #snaking in x dimension
xidx_ = xidx
x_ = x
else:
xidx_ = xsteps-1-xidx
x_ = allx[xidx_]
moveThread = threading.Thread(target = self.stage.moveto, args = (x_, y))
moveThread.start()
wlThread.join()
moveThread.join()
signal[yidx, xidx_, :], reference[yidx, xidx_, :] = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan)
reflectance[yidx, xidx_, :] = self._baselinecorrectionroutine(wavelengths, signal[yidx, xidx_, :], reference[yidx, xidx_, :])
delay[yidx, xidx_] = time.time() - startTime #time in seconds since scan began
firstscan = False
completed = 1
self.stage.moveto(x = x0, y = y0) #go back to map center position
self._lightOff()
def flyscanarea(self, label, xsize, ysize, wavelengths = None, xsteps = 21, ysteps = 21, x0 = None, y0 = None, export = True):
# clean up wavelengths input
wavelengths = self._cleanwavelengthinput(wavelengths)
currentx, currenty = self.stage.position # return position
if x0 is None:
x0 = currentx
if y0 is None:
y0 = currenty
allx = np.linspace(x0 - xsize/2, x0 + xsize/2, xsteps)
ally = np.linspace(y0 - ysize/2, y0 + ysize/2, ysteps)
data = np.zeros((ysteps, xsteps, len(wavelengths)))
delay = np.zeros((len(wavelengths), yidx))
firstscan = True
lastscan = False
reverse= -1 # for snaking
startTime = time.time()
for wlidx, wl in tqdm(enumerate(wavelengths), desc = 'Wavelength', total = wavelengths.shape[0], leave = False):
self.stage.moveto(x = allx[0], y = ally[0]) #move to starting position for each subsequent wavelength map
self._goToWavelength(wl) #set the correct wavelength
for yidx, y in tqdm(enumerate(ally), desc = 'Flyscan Lines', total = ally.shape[0], leave = False):
self.stage.moveto(y = y) #move to next line
if (yidx == ally.shape[0]) and (wlidx == wavelengths.shape[0]):
lastscan = True
reverse = reverse*(-1)
if reverse > 0: #go in the forward direction
linedata, linetime, linesignal, linereference = self._flyscanroutine(wavelength = wl,
x0 = allx[0],
x1 = allx[-1],
numpts = allx.shape[0],
firstscan = firstscan,
lastscan = lastscan
)
else: # go in the reverse direction
linedata, linetime, linesignal, linereference = self._flyscanroutine(wavelength = wl,
x0 = allx[0],
x1 = allx[-1],
numpts = allx.shape[0],
firstscan = firstscan,
lastscan = lastscan
)
linedata = np.flipud(linedata)
data[yidx,:,wlidx] = linedata
delay[wlidx, yidx] = time.time() - startTime #time in seconds since scan began
firstscan = False
self.stage.moveto(x = x0, y = y0) #go back to map center position
if export:
# export as a hfile
self._save_flyscanarea(label = label,
x = allx,
y = ally,
delay = delay,
wavelengths = wavelengths,
reflectance = data
)
def timeseries(self, label, duration, interval, wavelengths = None, logtemperature = False, export = True):
### records a reflectance spectrum for a given duration (seconds) at set intervals (seconds)
# TODO: I don't think this will work for single wavelength inputs
# clean up wavelengths input
def preparefile(f):
dummy1d = np.full((0,), np.nan)
dummy2d = np.full((0, len(wavelengths)), np.nan)
info, settings, baseline, completed = self._save_generic(f, label = label, scantype = 'timeseries')
## add scan parameters to settings
temp = settings.create_dataset('duration', data = np.array(duration))
temp.attrs['description'] = 'Total time (s) desired for time series.'
temp = settings.create_dataset('interval', data = np.array(interval))
temp.attrs['description'] = 'Time (s) desired between subsequent scans.'
if logtemperature:
temp = settings.create_dataset('temperatureLogged', data = 1)
else:
temp = settings.create_dataset('temperatureLogged', data = 0)
temp.attrs['description'] = 'Temperature logged during measurements. Note that this is the temperature of the heating pad in slot 2, actual sample temperature may vary.'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
reflectance = rawdata.create_dataset('reflectance', data = dummy2d, maxshape = (None,len(wavelengths)))
reflectance.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [timestep, wl]. Stored as fraction (0-1), not percent!'
signal = rawdata.create_dataset('signalRaw', data = dummy2d, maxshape = (None, len(wavelengths)))
signal.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
reference = rawdata.create_dataset('referenceRaw', data = dummy2d, maxshape = (None, len(wavelengths)))
reference.attrs['description'] = 'Raw signal for reference detector. (V)'
delay = rawdata.create_dataset('delay', data = dummy1d, maxshape = (None, ))
delay.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
temperature = rawdata.create_dataset('temperature', data = dummy1d, maxshape = (None, ))
temperature.attrs['description'] = 'Temperature (C) of heating pad in slot 2 at the time that each scan was acquired at.'
f.swmr_mode = True # Single Writer Multiple Reader, allows h5 file to be read during scan.
f.flush()
return reflectance, signal, reference, delay, temperature, completed
def h5_extend_append(dset, data):
'''
timeseries has an undetermined number of datapoints. this function
appends data to the h5 datasets.
'''
rows = dset.shape[0]
dset.resize(rows + 1, axis = 0)
dset[-1] = data
if logtemperature:
print('Note: Temperature control/logging is only valid on slot 2!')
wavelengths = self._cleanwavelengthinput(wavelengths)
startTime = time.time()
pbarPercent = 0
fpath = self._getsavepath(label = label) #generate filepath for saving data
self.__flushcounter = 0
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as self.f:
reflectance, signal, reference, delay, temperature, completed = preparefile(self.f)
with tqdm(total = 100, desc = 'Scanning every {0} s for {1} s'.format(interval, duration), leave = False) as pbar:
while (time.time() - startTime) <= duration:
if (time.time()-startTime) >= (interval*len(delay)): #time for the next scan
h5_extend_append(delay, time.time() - startTime)
if logtemperature:
h5_extend_append(temperature, self.heater.getTemperature())
sig, ref = self._scanroutine(wavelengths, lastscan = False)
h5_extend_append(reflectance, self._baselinecorrectionroutine(wavelengths, sig, ref))
h5_extend_append(signal, sig)
h5_extend_append(reference, ref)
self.f.flush() #flush after every scan
else: #if we have some time to wait between scans, close the shutter and go to the starting wavelength
self._lightOff()
self._goToWavelength(wavelength = wavelengths[0])
currentPercent = round(100*(time.time()-startTime)/duration)
if currentPercent > 100:
currentPercent = 100
if currentPercent > pbarPercent:
pbar.update(currentPercent - pbarPercent)
pbarPercent = currentPercent
time.sleep(interval/100) #we can hit our interval within 1% accuracy, but give the cpu some rest
completed = 1
self._lightOff()
def scanareaWaRD(self, label, wavelengths, wavelengths_full = None, xsize = 52, ysize = 52, xsteps = 53, ysteps = 53, x0 = None, y0 = None, position = None, export = True):
x0s = [33, 106, 106, 33] ## UPDATE PROPER LOCATIONS
y0s = [117,117,57.5,57.5]
fullScanCoordinates = [ #spiral pattern to sample pts at varying distance from map center
[2,28],
[15,2],
[19,14],
[19,28],
[23,43],
[26,26],
[28,23],
[36,30],
[41,12],
[49,31]
]
if position is not None:
if position < 1 or position > 4:
print('Error: Position must hold a value from 1-4. User provided {0}. Scanning centered at current stage position'.format(position))
else:
x0 = x0s[position-1]
y0 = y0s[position-1]
currentx, currenty = self.stage.position # return position
if x0 is None:
x0 = currentx
if y0 is None:
y0 = currenty
wavelengths = self._cleanwavelengthinput(wavelengths)
if wavelengths_full is not None:
wavelengths_full = self._cleanwavelengthinput(wavelengths_full)
else:
wavelengths_full = np.linspace(1700, 2000, 151).astype(int)
allx = np.linspace(x0 - xsize/2, x0 + xsize/2, xsteps)
ally = np.linspace(y0 - ysize/2, y0 + ysize/2, ysteps)
data = np.zeros((ysteps, xsteps, len(wavelengths)))
signal = np.zeros((ysteps, xsteps, len(wavelengths)))
reference = np.zeros((ysteps, xsteps, len(wavelengths)))
delay = np.zeros((ysteps, xsteps))
data_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
signal_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
reference_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
delay_full = np.zeros((len(fullScanCoordinates),))
x_full = np.zeros((len(fullScanCoordinates),))
y_full = np.zeros((len(fullScanCoordinates),))
fullScanIdx = 0
firstscan = True
lastscan = False
reverse= -1 # for snaking
startTime = time.time()
for xidx, x in tqdm(enumerate(allx), desc = 'Scanning X', total = allx.shape[0], leave = False):
reverse=reverse*(-1)
for yidx, y in tqdm(enumerate(ally), desc = 'Scanning Y', total = ally.shape[0], leave = False):
if xidx == xsteps-1 and yidx == ysteps-1:
lastScan = True
# Condition to map in a snake pattern rather than coming back to first x point
wlThread = threading.Thread(target = self._goToWavelength, args = (wavelengths[0],))
wlThread.start()
if reverse > 0: #go in the forward direction
yyidx = yidx
else: # go in reverse direction
yyidx = ysteps-1-yidx
moveThread = threading.Thread(target = self.stage.moveto, args = (x, ally[yyidx]))
moveThread.start()
wlThread.join()
moveThread.join()
signal[yyidx, xidx, :], reference[yyidx, xidx, :], _ = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan)
data[yyidx, xidx, :] = self._baselinecorrectionroutine(wavelengths, signal[yyidx, xidx, :], reference[yyidx, xidx, :])
delay[yyidx, xidx] = time.time() - startTime #time in seconds since scan began
firstscan = False
if [yyidx, xidx] in fullScanCoordinates: #we've reached a coordinate to perform a full spectrum WaRD scan
signal_full[fullScanIdx, :], reference_full[fullScanIdx, :], _ = self._scanroutine(wavelengths = wavelengths_full, firstscan = firstscan, lastscan = lastscan)
data_full[fullScanIdx, :] = self._baselinecorrectionroutine(wavelengths_full, signal_full[fullScanIdx, :], reference_full[fullScanIdx, :])
delay_full[fullScanIdx] = time.time() - startTime
x_full[fullScanIdx] = x
y_full[fullScanIdx] = ally[yyidx]
fullScanIdx = fullScanIdx + 1
self.stage.moveto(x = x0, y = y0) #go back to map center position
self._lightOff()
if export:
# export as a hfile
self._save_scanareaWaRD(
label = label,
x = allx,
y = ally,
delay = delay,
wavelengths = wavelengths,
reflectance = data,
signal = signal,
reference = reference,
x_full = x_full,
y_full = y_full,
delay_full = delay_full,
wavelengths_full = wavelengths_full,
reflectance_full = data_full,
signal_full = signal_full,
reference_full = reference_full
)
def scanareaWaRD_2(self, label, wavelengths, wavelengths_full = None, xsize = 52, ysize = 52, xsteps = 27, ysteps = 27, x0 = None, y0 = None, position = None, export = True):
x0s = [113.8, 41.4, 41.4, 113.8] ## UPDATE PROPER LOCATIONS - topright, topleft, bottomleft, bottomright
y0s = [119.6, 119.6, 56.7, 56.7]
# fullScanCoordinates = [ #spiral pattern to sample pts at varying distance from map center
# [2,28],
# [15,2],
# [19,14],
# [19,28],
# [23,43],
# [26,26],
# [28,23],
# [36,30],
# [41,12],
# [49,31]
# ]
fullScanCoordinates = []
for m in range(5):
for n in range(5):
fullScanCoordinates.append([m*4 + 4, n*4 + 4]) #take full scans on a 5x5 grid, every 8 mm apart. Nearly centered, 8mm offset map corner
if position is not None:
if position < 1 or position > 4:
print('Error: Position must hold a value from 1-4. User provided {0}. Scanning centered at current stage position'.format(position))
else:
x0 = x0s[position-1]
y0 = y0s[position-1]
currentx, currenty = self.stage.position # return position
if x0 is None:
x0 = currentx
if y0 is None:
y0 = currenty
wavelengths = self._cleanwavelengthinput(wavelengths)
if wavelengths_full is not None:
wavelengths_full = self._cleanwavelengthinput(wavelengths_full)
else:
wavelengths_full = np.linspace(1700, 2000, 151).astype(int)
allx = np.linspace(x0 - xsize/2, x0 + xsize/2, xsteps)
ally = np.linspace(y0 - ysize/2, y0 + ysize/2, ysteps)
data = np.zeros((ysteps, xsteps, len(wavelengths)))
signal = np.zeros((ysteps, xsteps, len(wavelengths)))
reference = np.zeros((ysteps, xsteps, len(wavelengths)))
delay = np.zeros((ysteps, xsteps))
data_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
signal_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
reference_full = np.zeros((len(fullScanCoordinates), len(wavelengths_full)))
delay_full = np.zeros((len(fullScanCoordinates),))
x_full = np.zeros((len(fullScanCoordinates),))
y_full = np.zeros((len(fullScanCoordinates),))
fullScanIdx = 0
firstscan = True
lastscan = False
reverse= -1 # for snaking
startTime = time.time()
for xidx, x in tqdm(enumerate(allx), desc = 'Scanning X', total = allx.shape[0], leave = False):
reverse=reverse*(-1)
for yidx, y in tqdm(enumerate(ally), desc = 'Scanning Y', total = ally.shape[0], leave = False):
if xidx == xsteps-1 and yidx == ysteps-1:
lastScan = True
# Condition to map in a snake pattern rather than coming back to first x point
wlThread = threading.Thread(target = self._goToWavelength, args = (wavelengths[0],))
wlThread.start()
if reverse > 0: #go in the forward direction
yyidx = yidx
else: # go in reverse direction
yyidx = ysteps-1-yidx
moveThread = threading.Thread(target = self.stage.moveto, args = (x, ally[yyidx]))
moveThread.start()
wlThread.join()
moveThread.join()
signal[yyidx, xidx, :], reference[yyidx, xidx, :] = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan, flush = False)
data[yyidx, xidx, :] = self._baselinecorrectionroutine(wavelengths, signal[yyidx, xidx, :], reference[yyidx, xidx, :])
delay[yyidx, xidx] = time.time() - startTime #time in seconds since scan began
firstscan = False
if [yyidx, xidx] in fullScanCoordinates: #we've reached a coordinate to perform a full spectrum WaRD scan
signal_full[fullScanIdx, :], reference_full[fullScanIdx, :] = self._scanroutine(wavelengths = wavelengths_full, firstscan = firstscan, lastscan = lastscan, flush = False)
data_full[fullScanIdx, :] = self._baselinecorrectionroutine(wavelengths_full, signal_full[fullScanIdx, :], reference_full[fullScanIdx, :])
delay_full[fullScanIdx] = time.time() - startTime
x_full[fullScanIdx] = x
y_full[fullScanIdx] = ally[yyidx]
fullScanIdx = fullScanIdx + 1
self.stage.moveto(x = x0, y = y0) #go back to map center position
self._lightOff()
if export:
# export as a hfile
self._save_scanareaWaRD(
label = label,
x = allx,
y = ally,
delay = delay,
wavelengths = wavelengths,
reflectance = data,
signal = signal,
reference = reference,
x_full = x_full,
y_full = y_full,
delay_full = delay_full,
wavelengths_full = wavelengths_full,
reflectance_full = data_full,
signal_full = signal_full,
reference_full = reference_full
)
def scanLBIC(self, label, wavelengths, xsize, ysize, xsteps, ysteps, x0 = None, y0 = None, export = True):
# clean up wavelengths input
wavelengths = self._cleanwavelengthinput(wavelengths)
currentx, currenty = self.stage.position # return position
if x0 is None:
x0 = currentx
if y0 is None:
y0 = currenty
allx = np.linspace(x0 - xsize/2, x0 + xsize/2, xsteps)
ally = np.linspace(y0 - ysize/2, y0 + ysize/2, ysteps)
signal = np.zeros((ysteps, xsteps, len(wavelengths)))
reference = np.zeros((ysteps, xsteps, len(wavelengths)))
delay = np.zeros((ysteps, xsteps))
firstscan = True
lastscan = False
reverse= -1 # for snaking
startTime = time.time()
for xidx, x in tqdm(enumerate(allx), desc = 'Scanning X', total = allx.shape[0], leave = False):
reverse=reverse*(-1)
for yidx, y in tqdm(enumerate(ally), desc = 'Scanning Y', total = ally.shape[0], leave = False):
if xidx == xsteps-1 and yidx == ysteps-1:
lastScan = True
# Condition to map in a snake pattern rather than coming back to first x point
wlThread = threading.Thread(target = self._goToWavelength, args = (wavelengths[0],))
wlThread.start()
if reverse > 0: #go in the forward direction
moveThread = threading.Thread(target = self.stage.moveto, args = (x, y))
moveThread.start()
wlThread.join()
moveThread.join()
signal[yidx, xidx, :], reference[yidx, xidx, :], _ = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan)
delay[yidx, xidx] = time.time() - startTime #time in seconds since scan began
else: # go in the reverse direction
moveThread = threading.Thread(target = self.stage.moveto, args = (x, ally[ysteps-1-yidx]))
moveThread.start()
wlThread.join()
moveThread.join()
signal[ysteps-1-yidx, xidx, :], reference[ysteps-1-yidx, xidx, :], _ = self._scanroutine(wavelengths = wavelengths, firstscan = firstscan, lastscan = lastscan)
delay[ysteps-1-yidx, xidx] = time.time() - startTime #time in seconds since scan began
firstscan = False
self.stage.moveto(x = x0, y = y0) #go back to map center position
self._lightOff()
if export:
# export as a hfile
self._save_scanLBIC(
label = label,
x = allx,
y = ally,
delay = delay,
wavelengths = wavelengths,
signal = signal,
reference = reference
)
# internal methods
def _scanroutine(self, wavelengths, firstscan = True, lastscan = True, flush = True):
self._goToWavelength(wavelengths[0])
if firstscan:
self._lightOn()
signal = np.zeros(wavelengths.shape)
ref = np.zeros(wavelengths.shape)
ratio = np.zeros(wavelengths.shape)
for idx, wl in tqdm(enumerate(wavelengths), total = wavelengths.shape[0], desc = 'Scanning {0:.1f}-{1:.1f} nm'.format(wavelengths[0], wavelengths[-1]), leave = False):
self.__flushcounter += 1
if self.__flushcounter >= self.__flushinterval and flush:
self.f.flush()
self.__flushcounter = 0
self._goToWavelength(wl)
out = self.daq.read()
signal[idx] = out['IntSphere']['Mean']
ref[idx] = out['Reference']['Mean']
if lastscan:
self._lightOff()
return signal, ref
def _flyscanroutine(self, wavelength, x0, x1, numpts, firstscan = True, lastscan = True):
def clipTime(timeraw, data, rampTime):
tmax = max(timeraw) - rampTime
tmin = rampTime
return data[(timeraw>tmin) & (timeraw<tmax)]
rampDistance = 2 #distance traveled before the stage reaches constant speed
rampTime = 0.5 #time elapsed before the stage reaches constant speed
useFraction = 1 #fraction of each time step to use. Valid values = 0.1 - 1, although very small values will likely hurt data quality. 1 = use entire time step, 0.5 = only use center 50% of timestep, etc.
# Spread out the line scan endpoints to account for acceleration runway
if x0 > x1:
x0 = x0 + rampDistance
x1 = x1 - rampDistance
else:
x0 = x0 - rampDistance
x1 = x1 + rampDistance
self.stage.moveto(x = x0) #move to flyscan start position
if firstscan:
self._lightOn()
self.daq.startBG() #start background acquisition
self.stage.moveto(x = x1) #move to flyscan end position
timeraw, detectorData = self.daq.stopBG() #stop and read data from background acquisition
signalraw = detectorData[:,0]
referenceraw = detectorData[:,1]
signal = clipTime(timeraw, signalraw, rampTime)
reference = clipTime(timeraw, referenceraw, rampTime)
time = clipTime(timeraw, timeraw, rampTime)
data = self._baselinecorrectionroutine(wavelength, signal, reference)
time = time - time.min() #force time to start at 0
endtime = time.max()
timestep = endtime / numpts
dropTime = timestep * 0.5 * (1-useFraction) #amount of time to drop from beginning/end of each timestep.
reflectance = np.zeros((numpts,))
for i in range(numpts):
tmin = timestep*i + dropTime
tmax = timestep*(i+1) - dropTime
ptData = data[(time > tmin)&(time < tmax)]
reflectance[i] = ptData.mean()
if lastscan:
self._lightOff()
return reflectance, timeraw, signalraw, referenceraw
def _baselinecorrectionroutine(self, wavelengths, signal, reference, ratio = None):
if self.__baselineTaken == False:
raise ValueError("Take baseline first")
# corrected = np.zeros(wavelengths.shape)
if self.processPulseTrain:
# corrected = np.divide(ratio, self.__baseline['Ratio'])
numerator = np.zeros(wavelengths.shape)
denominator = np.zeros(wavelengths.shape)
for idx, wl in enumerate(wavelengths):
# meas = signal[idx]/reference[idx]
bl_idx = np.where(self.__baseline['Wavelengths'] == wl)[0]
numerator[idx] = (signal[idx]) / (self.__baseline['LightRaw'][bl_idx])
denominator[idx] = (reference[idx]) / (self.__baseline['LightRefRaw'][bl_idx])
# denominator[idx] = 1
# corrected[idx] = (meas-self.__baseline['Dark']) / (self.__baseline['Light'][bl_idx]-self.__baseline['Dark'])
corrected = numerator/denominator
else:
numerator = np.zeros(wavelengths.shape)
denominator = np.zeros(wavelengths.shape)
for idx, wl in enumerate(wavelengths):
# meas = signal[idx]/reference[idx]
bl_idx = np.where(self.__baseline['Wavelengths'] == wl)[0]
numerator[idx] = (signal[idx]-self.__baseline['DarkRaw']) / (self.__baseline['LightRaw'][bl_idx]-self.__baseline['DarkRaw'])
denominator[idx] = (reference[idx]-self.__baseline['DarkRefRaw']) / (self.__baseline['LightRefRaw'][bl_idx]-self.__baseline['DarkRefRaw'])
# denominator[idx] = 1
# corrected[idx] = (meas-self.__baseline['Dark']) / (self.__baseline['Light'][bl_idx]-self.__baseline['Dark'])
corrected = numerator/denominator
return corrected
def _findedges(self, x, r, ax = None):
### Given stage positions x and reflectance values r from a line scan at a single wavelength, compute the edges and center of
# the sample area using the first derivative. If given an axis handle, plots the line scan + suggested positions to this axis.
r1 = np.gradient(r)
# r2 = np.gradient(r1)
x1 = x[np.where(r1==r1.max())]
x2 = x[np.where(r1==r1.min())]
if x1 > x2: #force x2 > x1 - initial order depends on whether reflectance is higher or lower on target area vs background
temp = x1
x1 = x2
x2 = temp
center = np.mean([x1,x2])
rng = x2[0]-x1[0]
if ax is not None:
ax.plot(x,r)
ylim0 = [x for x in plt.ylim()]
ax.plot([x1,x1], ylim0, color = 'r', linestyle = '--')
ax.plot([x2,x2], ylim0, color = 'r', linestyle = '--')
ax.plot([center, center], ylim0, color = 'r', linestyle = ':')
ylim0[1] += 0.15 * (ylim0[1]-ylim0[0])
plt.ylim(ylim0)
ax.text(0.5, 0.98,
'Center: {0:.3f}, Range: {1:.3f}'.format(center, rng),
verticalalignment = 'top',
horizontalalignment = 'center',
transform = ax.transAxes,
fontsize = 16,
# color = 'g'
)
return center, rng
def _cleanwavelengthinput(self, wavelength):
if wavelength is None:
wavelength = self.__baseline['Wavelengths'] #if no wavelengths specified, assume scanning over baseline wavelengths.
# clean up wavelengths input
if type(wavelength) is np.ndarray:
if wavelength.shape == ():
wavelength = np.array([wavelength]) #cast to (1,)
else:
pass #should already be good
elif type(wavelength) is list:
wavelength = np.array(wavelength)
else:
wavelength = np.array([wavelength]) #assume we have a single int/float value here. if its a string we'll throw a normal error downstream
return wavelength
### Save methods to dump measurements to hdf5 file. Currently copied from PL code, need to fit this to the mapping data.
def _getsavepath(self, label):
todaysDate = datetime.datetime.now().strftime('%Y%m%d')
self.outputdir = os.path.join(root, datafolder, todaysDate) #set outputdir folder so the scan saves on correct date (date of scan completion)
### figure out the sample directory, name, total filepath
if not os.path.exists(self.outputdir):
os.mkdir(self.outputdir)
fids = os.listdir(self.outputdir)
fileNumber = 1
for fid in fids:
if 'frgmapper' in fid:
fileNumber = fileNumber + 1
if label is not None:
fname = 'frgmapper_{0:04d}_{1}.h5'.format(fileNumber, label)
else:
fname = 'frgmapper_{0:04d}.h5'.format(fileNumber)
fpath = os.path.join(self.outputdir, fname)
return fpath
def _save_generic(self, f, label, scantype):
### General information that will be saved regardless of which method (point scan, area scan, etc.) was used. Should be called
# at the beginning of any scan.
# sample info
info = f.create_group('/info')
info.attrs['description'] = 'Metadata describing sample, datetime, etc.'
temp = info.create_dataset('name', data = label.encode('utf-8'))
temp.attrs['description'] = 'Sample name.'
date = info.create_dataset('date', data = datetime.datetime.now().strftime('%Y-%m-%d').encode('utf-8'))
temp.attrs['description'] = 'Measurement start date.'
temp = info.create_dataset('time', data = datetime.datetime.now().strftime('%H:%M:%S').encode('utf-8'))
temp.attrs['description'] = 'Measurement start time of day.'
temp = info.create_dataset('completed', data = 0)
temp.attrs['description'] = 'Boolean flag, true if measurement was successfully completed. If false, portion of data is likely missing, and should be reported as NaNs'
temp = info.create_dataset('type', data = scantype.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
# measurement settings
settings = f.create_group('/settings')
settings.attrs['description'] = 'Settings used for measurements.'
temp = settings.create_dataset('hardware', data = self.__hardwareSetup.encode('utf-8'))
temp.attrs['description'] = 'Light source/ardware used for this scan - either the newport lamp + monochromator, or nkt compact + select'
temp = settings.create_dataset('dwelltime', data = self.__dwelltime)
temp.attrs['description'] = 'Time spent collecting signal at each wavelength.'
temp = settings.create_dataset('count_rate', data = self.daq.rate)
temp.attrs['description'] = 'Acquisition rate (Hz) of data acquisition unit when reading detector voltages.'
temp = settings.create_dataset('num_counts', data = self.daq.counts)
temp.attrs['description'] = 'Voltage counts per detector used to quantify reflectance values.'
temp = settings.create_dataset('position', data = np.array(self.stage.position))
temp.attrs['description'] = 'Stage position (x,y) during scan.'
completed = settings.create_dataset('scan_completed', data = 0)
completed.attrs['description'] = 'Flag indicating whether scan was run to completion. 0 if cancelled/interrupted.'
# baseline measurement
baseline = f.create_group('/settings/baseline')
temp = baseline.create_dataset('wavelengths', data = np.array(self.__baseline['Wavelengths']))
temp.attrs['description'] = 'Wavelengths (nm) scanned for baseline measurement'
temp = baseline.create_dataset('sphere_ill', data = np.array(self.__baseline['LightRaw']))
temp.attrs['description'] = 'Raw counts for integrating sphere detector during illuminated baseline measurement'
temp = baseline.create_dataset('ref_ill', data = np.array(self.__baseline['LightRefRaw']))
temp.attrs['description'] = 'Raw counts for reference detector during illuminated baseline measurement'
temp = baseline.create_dataset('ratio_ill', data = np.array(self.__baseline['Light']))
temp.attrs['description'] = 'Ratio of sphere/reference counts during illuminated baseline measurement. This number is considered 100\% reflectance'
temp = baseline.create_dataset('sphere_dark', data = np.array(self.__baseline['DarkRaw']))
temp.attrs['description'] = 'Raw counts for integrating sphere detector during dark baseline measurement. Single point, independent of wavelength.'
temp = baseline.create_dataset('ref_dark', data = np.array(self.__baseline['DarkRefRaw']))
temp.attrs['description'] = 'Raw counts for reference detector during dark baseline measurement. Single point, independent of wavelength.'
temp = baseline.create_dataset('ratio_dark', data = np.array(self.__baseline['Dark']))
temp.attrs['description'] = 'Ratio of sphere/reference counts during illuminated baseline measurement. This number is considered 0\% reflectance. Single point, independent of wavelength.'
return info, settings, baseline, completed
def _save_scanareaWaRD(self, label, x, y, delay, wavelengths, reflectance, signal, reference, x_full, y_full, delay_full, wavelengths_full, reflectance_full, signal_full, reference_full):
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as self.f:
info, settings, baseline = self._save_generic(f = self.f, label = label, scantype = 'scanareaWaRD')
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('numfull', data = np.array(len(x_full)))
temp.attrs['description'] = 'Number of points at which a full WaRD spectrum was acquired'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0 '
## measured data
rawdata = self.f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
## measured data, full WaRD spectra
temp = rawdata.create_dataset('x_full', data = np.array(x_full))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y_full', data = np.array(y_full))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx_full', data = np.array(x_full - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely_full', data = np.array(y_full - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths_full', data = np.array(wavelengths_full))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance_full', data = np.array(reflectance_full))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw_full', data = np.array(signal_full))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw_full', data = np.array(reference_full))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay_full', data = np.array(delay_full))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
print('Data saved to {0}'.format(fpath))
def _save_flyscanarea(self, label, x, y, delay, wavelengths, reflectance):
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._save_generic(f, label = label)
## add scan type to info
temp = info.create_dataset('type', data = 'flyscanarea'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0. Note that steps in x are divided out of a continuous line scan.'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point. Note that points in x are divided out of a continous line scan.'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point. Note that points in x are divided out of a continous line scan.'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) at which sequential flyscan maps were performed.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each line scan was acquired at. [wl, y]. Measured as seconds since first line scan.'
print('Data saved to {0}'.format(fpath))
def _save_timeseries(self, label, wavelengths, reflectance, delay, duration, interval, signal, reference, logtemperature, temperature):
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._save_generic(f, label = label)
## add scan type to info
temp = info.create_dataset('type', data = 'timeseries'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('duration', data = np.array(duration))
temp.attrs['description'] = 'Total time (s) desired for time series.'
temp = settings.create_dataset('interval', data = np.array(interval))
temp.attrs['description'] = 'Time (s) desired between subsequent scans.'
if logtemperature:
temp = settings.create_dataset('temperatureLogged', data = 1)
else:
temp = settings.create_dataset('temperatureLogged', data = 0)
temp.attrs['description'] = 'Temperature logged during measurements. Note that this is the temperature of the heating pad in slot 2, actual sample temperature may vary.'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
temp = rawdata.create_dataset('temperature', data = np.array(temperature))
temp.attrs['description'] = 'Temperature (C) of heating pad in slot 2 at the time that each scan was acquired at.'
print('Data saved to {0}'.format(fpath))
def _save_scanLBIC(self, label, x, y, delay, wavelengths, signal, reference):
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings = self._save_generic(f, label = label, include_baseline = False)
## add scan type to info
temp = info.create_dataset('type', data = 'scanLBIC'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanarea()), leave stepsize as 0 '
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for calculating LBIC current. (V across 50 ohm terminating resistor)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
print('Data saved to {0}'.format(fpath))
# def _save_findArea(self, label, wavelength, reflectance):
def _save_scanArea(self, label, x, y, delay, wavelengths, reflectance, signal, reference):
fpath = self._getSavePath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._saveGeneralInformation(f, label = label)
## add scan type to info
temp = info.create_dataset('type', data = 'scanArea'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0 '
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
print('Data saved to {0}'.format(fpath))
def _save_scanAreaWaRD(self, label, x, y, delay, wavelengths, reflectance, signal, reference, x_full, y_full, delay_full, wavelengths_full, reflectance_full, signal_full, reference_full):
fpath = self._getsavepath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._save_generic(f, label = label, scantype = 'scanareaWaRD')
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('numfull', data = np.array(len(x_full)))
temp.attrs['description'] = 'Number of points at which a full WaRD spectrum was acquired'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0 '
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
## measured data, full WaRD spectra
temp = rawdata.create_dataset('x_full', data = np.array(x_full))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y_full', data = np.array(y_full))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx_full', data = np.array(x_full - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely_full', data = np.array(y_full - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths_full', data = np.array(wavelengths_full))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance_full', data = np.array(reflectance_full))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw_full', data = np.array(signal_full))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw_full', data = np.array(reference_full))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay_full', data = np.array(delay_full))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
print('Data saved to {0}'.format(fpath))
def _save_flyscanArea(self, label, x, y, delay, wavelengths, reflectance):
fpath = self._getSavePath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._saveGeneralInformation(f, label = label)
## add scan type to info
temp = info.create_dataset('type', data = 'flyscanArea'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0. Note that steps in x are divided out of a continuous line scan.'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point. Note that points in x are divided out of a continous line scan.'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point. Note that points in x are divided out of a continous line scan.'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) at which sequential flyscan maps were performed.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each line scan was acquired at. [wl, y]. Measured as seconds since first line scan.'
print('Data saved to {0}'.format(fpath))
def _save_timeSeries(self, label, wavelengths, reflectance, delay, duration, interval, signal, reference, logtemperature, temperature):
fpath = self._getSavePath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings, baseline = self._saveGeneralInformation(f, label = label)
## add scan type to info
temp = info.create_dataset('type', data = 'timeSeries'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('duration', data = np.array(duration))
temp.attrs['description'] = 'Total time (s) desired for time series.'
temp = settings.create_dataset('interval', data = np.array(interval))
temp.attrs['description'] = 'Time (s) desired between subsequent scans.'
if logtemperature:
temp = settings.create_dataset('temperatureLogged', data = 1)
else:
temp = settings.create_dataset('temperatureLogged', data = 0)
temp.attrs['description'] = 'Temperature logged during measurements. Note that this is the temperature of the heating pad in slot 2, actual sample temperature may vary.'
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('reflectance', data = np.array(reflectance))
temp.attrs['description'] = 'Baseline-corrected reflectance measured. Stored as [y, x, wl]. Stored as fraction (0-1), not percent!'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for integrating sphere detector. (V)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
temp = rawdata.create_dataset('temperature', data = np.array(temperature))
temp.attrs['description'] = 'Temperature (C) of heating pad in slot 2 at the time that each scan was acquired at.'
print('Data saved to {0}'.format(fpath))
def _save_scanLBIC(self, label, x, y, delay, wavelengths, signal, reference):
fpath = self._getSavePath(label = label) #generate filepath for saving data
with h5py.File(fpath, 'w', swmr = True, libver = 'latest') as f:
info, settings = self._saveGeneralInformation(f, label = label, include_baseline = False)
## add scan type to info
temp = info.create_dataset('type', data = 'scanLBIC'.encode('utf-8'))
temp.attrs['description'] = 'Type of measurement held in this file.'
## add scan parameters to settings
temp = settings.create_dataset('numx', data = np.array(x.shape[0]))
temp.attrs['description'] = 'Number of points scanned in x'
temp = settings.create_dataset('numy', data = np.array(y.shape[0]))
temp.attrs['description'] = 'Number of points scanned in y'
temp = settings.create_dataset('rangex', data = np.array(np.abs(x[-1] - x[0])))
temp.attrs['description'] = 'Range scanned in x (mm)'
temp = settings.create_dataset('rangey', data = np.array(np.abs(y[-1] - y[0])))
temp.attrs['description'] = 'Range scanned in y (mm)'
# calculate step size. Calculates the average step size in x and y. If either axis has length 1 (ie line scan), only consider step size
# in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0
countedaxes = 0
stepsize = 0
if x.shape[0] > 1:
stepsize = stepsize + np.abs(x[1] - x[0])
countedaxes = countedaxes + 1
if y.shape[0] > 1:
stepsize = stepsize + np.abs(y[1] - y[0])
countedaxes = countedaxes + 1
if countedaxes:
stepsize = stepsize / countedaxes
temp = settings.create_dataset('stepsize', data = np.array(stepsize))
temp.attrs['description'] = 'Average step size (mm) in x and y. If either axis has length 1 (ie line scan), only consider step size in the other axis. If both axes have length 0 (point scan, although not a realistic outcome for .scanArea()), leave stepsize as 0 '
## measured data
rawdata = f.create_group('/data')
rawdata.attrs['description'] = 'Data acquired during area scan.'
temp = rawdata.create_dataset('x', data = np.array(x))
temp.attrs['description'] = 'Absolute X coordinate (mm) per point'
temp = rawdata.create_dataset('y', data = np.array(y))
temp.attrs['description'] = 'Absolute Y coordinate (mm) per point'
temp = rawdata.create_dataset('relx', data = np.array(x - np.min(x)))
temp.attrs['description'] = 'Relative X coordinate (mm) per point'
temp = rawdata.create_dataset('rely', data = np.array(y - np.min(y)))
temp.attrs['description'] = 'Relative Y coordinate (mm) per point'
temp = rawdata.create_dataset('wavelengths', data = np.array(wavelengths))
temp.attrs['description'] = 'Wavelengths (nm) scanned per point.'
temp = rawdata.create_dataset('signalRaw', data = np.array(signal))
temp.attrs['description'] = 'Raw signal for calculating LBIC current. (V across 50 ohm terminating resistor)'
temp = rawdata.create_dataset('referenceRaw', data = np.array(reference))
temp.attrs['description'] = 'Raw signal for reference detector. (V)'
temp = rawdata.create_dataset('delay', data = np.array(delay))
temp.attrs['description'] = 'Time (seconds) that each scan was acquired at. Measured as seconds since first scan point.'
print('Data saved to {0}'.format(fpath))
class controlMono(controlGeneric):
def __init__(self, dwelltime = 0.25):
super().__init__(dwelltime = dwelltime)
self.__hardwareSetup = 'mono' #distinguish whether saved data comes from the mono or nkt setup
self.stage = None
self.mono = None
self.daq = None
self.connect()
self.daq.useExtClock = False #dont use external trigger to drive daq
self.processPulseTrain = False
plt.ion() #make plots of results non-blocking
def connect(self):
#connect to mono, stage, detector+daq hardware
self.mono = Mono()
print("mono connected")
self.daq = DAQ(dwelltime = self.dwelltime)
print("daq connected")
self.stage = Thorlabs_LTS150_xy()
print("stage connected")
def disconnect(self):
self.mono.disconnect()
self.daq.disconnect()
self.stage.disable()
### internal methods specific to mono hardware setup
def _goToWavelength(self, wavelength):
self.mono.wavelength = wavelength
def _lightOn(self):
if not self.mono.shutterOpenStatus:
self.mono.open_shutter()
return True
def _lightOff(self):
if self.mono.shutterOpenStatus:
self.mono.close_shutter()
return True
class controlNKT(controlGeneric):
def __init__(self, dwelltime = 0.2):
super().__init__(dwelltime = dwelltime)
self.__hardwareSetup = 'nkt' #distinguish whether saved data comes from the mono or nkt setup
self.stage = None
self.select = None
self.compact = None
self.daq = None
self.heater = None
self.lia = None #SR830 lock-in amplifier
self.connect()
self.daq.useExtClock = False #use external Compact trigger to drive daq, match the laser pulse train
self.processPulseTrain = False
plt.ion() #make plots of results non-blocking
def connect(self):
#connect to mono, stage, detector+daq hardware
self.lia = SR830('GPIB0::8::INSTR')
print("SR830 LIA connected")
self.__set_up_lia()
self.compact = Compact(
port = 'COM16',
pulseFrequency = 21505
)
print("compact connected")
self.select = Select(
port = 'COM16'
)
self.select.setAOTF(1700, 0.6)
print("select+rf driver connected")
self.daq = DAQ(
dwelltime = self.dwelltime,
rate = 50000,
countsPerTrigger = 3,
countsPulseDuration = 20
)
print("daq connected")
self.stage = Thorlabs_LTS150_xy()
print("stage connected")
self.heater = Omega(
port = 'COM15'
)
print("heater connected")
def disconnect(self):
self.compact.disconnect()
self.select.disconnect()
self.daq.disconnect()
self.stage.disable()
self.heater.disconnect()
def __set_up_lia(self):
input('Ensure that orange taped "mapper" cables are attached to the appropriate inputs on the SR830 Lock-In Amplifier under the optical table - press Enter to confirm')
self.lia.channel1 = 'R'
self.lia.filter_slope = 24
self.lia.input_config = 'A'
self.lia.input_coupling = 'DC'
self.lia.input_grounding = 'Float'
self.lia.input_notch_config = 'Both'
self.lia.sensitivity = 0.02 #2x10 mV
self.lia.set_scaling('R',0, 1.0)
self.lia.time_constant = .001 #1x1 ms
self.lia.reference_source = 'External'
return True
### internal methods specific to nkt hardware setup
def _goToWavelength(self, wavelength):
if not self.select.rfOn: #make sure the rf driver is on
self.select.on()
self.select.setSingleAOTF(wavelength)
return True
def _lightOn(self):
self.preCheck() #checks shutters and interlock, gives user a chance to remedy before continuing
# if not self.compact.emissionOn:
self.compact.on()
return True
def _lightOff(self):
self.compact.off()
return True
def preCheck(self):
goodToGo = False
self.compact.resetInterlock() # this will "press the reset button on the front panel". interlock will stay unsatisfied if door is open.
while not goodToGo:
if self.select.checkShutter() and self.compact.checkInterlock():
goodToGo = True
else:
input('Press Enter when issues have been resolved')
return goodToGo |
mapplot.py |
from mpl_toolkits.basemap import Basemap
import BaseDomsHandler
import ResultsStorage
import numpy as np
import string
from cStringIO import StringIO
from multiprocessing import Process, Manager
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$ C)",
"sss": "(g/L)"
}
def __square(minLon, maxLon, minLat, maxLat):
if maxLat - minLat > maxLon - minLon:
a = ((maxLat - minLat) - (maxLon - minLon)) / 2.0
minLon -= a
maxLon += a
elif maxLon - minLon > maxLat - minLat:
a = ((maxLon - minLon) - (maxLat - minLat)) / 2.0
minLat -= a
maxLat += a
return minLon, maxLon, minLat, maxLat
def render(d, lats, lons, z, primary, secondary, parameter):
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(string.upper("%s vs. %s" % (primary, secondary)))
# ax.set_ylabel('Latitude')
# ax.set_xlabel('Longitude')
minLatA = np.min(lats)
maxLatA = np.max(lats)
minLonA = np.min(lons)
maxLonA = np.max(lons)
minLat = minLatA - (abs(maxLatA - minLatA) * 0.1)
maxLat = maxLatA + (abs(maxLatA - minLatA) * 0.1)
minLon = minLonA - (abs(maxLonA - minLonA) * 0.1)
maxLon = maxLonA + (abs(maxLonA - minLonA) * 0.1)
minLon, maxLon, minLat, maxLat = __square(minLon, maxLon, minLat, maxLat)
# m = Basemap(projection='mill', llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,resolution='l')
m = Basemap(projection='mill', llcrnrlon=minLon, llcrnrlat=minLat, urcrnrlon=maxLon, urcrnrlat=maxLat,
resolution='l')
m.drawparallels(np.arange(minLat, maxLat, (maxLat - minLat) / 5.0), labels=[1, 0, 0, 0], fontsize=10)
m.drawmeridians(np.arange(minLon, maxLon, (maxLon - minLon) / 5.0), labels=[0, 0, 0, 1], fontsize=10)
m.drawcoastlines()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966', lake_color='#99ffff')
#lats, lons = np.meshgrid(lats, lons)
masked_array = np.ma.array(z, mask=np.isnan(z))
z = masked_array
values = np.zeros(len(z))
for i in range(0, len(z)):
values[i] = ((z[i] - np.min(z)) / (np.max(z) - np.min(z)) * 20.0) + 10
x, y = m(lons, lats)
im1 = m.scatter(x, y, values)
im1.set_array(z)
cb = m.colorbar(im1)
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
cb.set_label("Difference %s" % units)
sio = StringIO()
plt.savefig(sio, format='png')
plot = sio.getvalue()
if d is not None:
d['plot'] = plot
return plot
class DomsMapPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, lats, lons, z, parameter, primary, secondary, args=None, bounds=None, count=None, details=None, computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results={"lats": lats, "lons": lons, "values": z}, args=args, details=details, bounds=bounds, count=count, computeOptions=computeOptions, executionId=executionId)
self.__lats = lats
self.__lons = lons
self.__z = np.array(z)
self.__parameter = parameter
self.__primary = primary
self.__secondary = secondary
self.__plot = plot
def toImage(self):
return self.__plot
def renderAsync(x, y, z, primary, secondary, parameter):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, y, z, primary, secondary, parameter))
p.start()
p.join()
return d['plot']
def createMapPlot(id, parameter):
with ResultsStorage.ResultsRetrieval() as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
lats = []
lons = []
z = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in data:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
z.append((a - b))
z.append((a - b))
else:
z.append(1.0)
z.append(1.0)
lats.append(entry["y"])
lons.append(entry["x"])
lats.append(match["y"])
lons.append(match["x"])
plot = renderAsync(lats, lons, z, primary, secondary, parameter)
r = DomsMapPlotQueryResults(lats=lats, lons=lons, z=z, parameter=parameter, primary=primary, secondary=secondary,
args=params,
details=stats, bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
|
operation_task.py | import datetime
import json
import re
import os
import requests
import time
import threading
import pickle
from django.core.mail import send_mail
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import render_to_response, render
from django.core.cache import cache
from ApiManager.utils import schedule
from ApiManager.utils.case_utils import run_case_by_id
from ApiManager.utils.forms import TaskModelForm
from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord
from frame.utils.common import get_ajax_msg, dataToJson
from ApiManager.utils.forms import get_validate_form_msg
from ApiManager.utils.utils import pagination_for_objects
from Joy_QA_Platform.settings import EMAIL_FROM
from Joy_QA_Platform.configs import AUTH_ADD_TASK, AUTH_DELETE, AUTH_UPDATE, AUTH_VIEW, EMAIL_SUFFIX
is_timer_start = False
run_task_list = []
run_job_dict = {}
def task_list(request):
if request.method == "GET":
return render(request, 'api/task_list.html')
elif request.method == "POST":
index = int(request.POST.get('index'))
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
results = filter_tasks_for_user(request.user, TaskInfo.objects.filter().order_by('-id'), AUTH_VIEW)
tasks = pagination_for_objects(results, index)
if tasks is not None and len(tasks) > 0:
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(results)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务列表成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic,
'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_create(request):
if request.method == 'GET':
return render(request, 'api/task_new.html')
elif request.user.has_perm(AUTH_ADD_TASK):
if request.method == 'POST':
model_form = TaskModelForm(request.POST)
if model_form.is_valid():
task_name = request.POST.get('task_name')
env_id = request.POST.get('belong_env')
project_id = request.POST.get('belong_project')
module_id = request.POST.get('belong_module')
emails = request.POST.get('receiver_email')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
if request.POST.get('is_loop') == 'true':
is_loop = True
elif request.POST.get('is_loop') == 'false':
is_loop = False
interval_minute = request.POST.get('interval_minute')
error_msg = None
if not EnvInfo.objects.filter(id=env_id).exists():
error_msg = '此环境不存在'
elif not ProjectInfo.objects.filter(id=project_id).exists():
error_msg = '此项目不存在'
elif not ModuleInfo.objects.filter(id=module_id).exists():
error_msg = '此模块不存在'
elif TaskInfo.objects.filter(task_name=task_name, belong_module_id=module_id).exists():
error_msg = '已存在此任务'
elif start_time <= datetime.datetime.now():
error_msg = '任务开始时间早于当前时间'
elif is_loop and int(interval_minute) < 1:
error_msg = '任务开始循环间隔时间不能小于1分钟'
elif not validate_emails(emails.split(';')):
error_msg = '邮箱格式错误'
if error_msg is not None:
return JsonResponse(get_ajax_msg(0, 0, error_msg, {}))
model_form.instance.belong_env_id = env_id
model_form.instance.belong_project_id = project_id
model_form.instance.belong_module_id = module_id
model_form.instance.start_time = start_time
model_form.instance.receiver_email = deal_emails(emails.split(';'))
model_form.save()
for case_id in request.POST.get('case_list').split(','):
task = TaskInfo.objects.get(task_name=request.POST.get('task_name'))
case = TestCaseInfo.objects.get(id=case_id)
task.cases.add(case)
return JsonResponse(get_ajax_msg(1, 1, '添加任务成功', {}))
else:
msg = get_validate_form_msg(model_form)
return JsonResponse(get_ajax_msg(0, 0, msg))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有创建任务的权限'))
def task_search(request):
if request.method == 'POST':
index = int(request.POST.get('index'))
task_name = request.POST.get('task_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
tasks = None
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
count = 0
if len(task_name) == 0 and len(project_name) == 0 and len(module_name) == 0:
return JsonResponse(get_ajax_msg(0, 0, '搜索条件无效'))
else:
tasks = TaskInfo.objects.all()
if len(module_name) != 0 and module_name != '模块名称':
tasks = tasks.filter(belong_module__module_name__contains=module_name)
if len(project_name) != 0 and project_name != '项目名称':
tasks = tasks.filter(belong_project__project_name__contains=project_name)
if len(task_name) != 0:
tasks = tasks.filter(task_name__contains=task_name)
if tasks == None:
return JsonResponse(get_ajax_msg(0, 0, '查询出错'))
if tasks != None and len(tasks) > 0:
tasks = filter_tasks_for_user(request.user, tasks.order_by('-id'), AUTH_VIEW) # 根据用户权限筛选模块
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(tasks)
tasks = pagination_for_objects(tasks, index)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '搜索成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic, 'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_delete(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
if check_perm(request.user, tasks[0], AUTH_DELETE):
tasks[0].delete()
return JsonResponse(get_ajax_msg(1, 1, '删除成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有删除该任务的权限'))
def task_query(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
tasks = filter_tasks_for_user(request.user, tasks, AUTH_VIEW)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务成功', {'tasks': data}))
def task_update(request):
if request.method == 'POST':
task_form = TaskModelForm(request.POST)
if task_form.is_valid():
task_id = request.POST.get('id')
task_name = request.POST.get('task_name')
env_name = request.POST.get('env_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
receiver_email = request.POST.get('receiver_email')
case_list = request.POST.get('case_list').split(',')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
interval_minute = request.POST.get('interval_minute')
if request.POST.get('is_loop') == 'true':
is_loop = True
if int(interval_minute) < 1:
return JsonResponse(get_ajax_msg(0, 0, '循环间隔时间不能小于1分钟', {}))
elif request.POST.get('is_loop') == 'false':
is_loop = False
if start_time <= datetime.datetime.now():
start_time = datetime.datetime.now()
# return JsonResponse(get_ajax_msg(0, 0, '任务开始时间早于当前时间', {}))
if not validate_emails(receiver_email.split(';')):
return JsonResponse(get_ajax_msg(0, 0, '邮箱格式错误'))
# print(deal_emails(receiver_email.split(';')))
try:
task = TaskInfo.objects.get(id=task_id)
if TaskInfo.objects.filter(task_name=task_name,belong_module_id=module_name).exclude(id=task_id).exists():
return JsonResponse(get_ajax_msg(0, 0, '已存在此任务名称', {}))
if not task.is_run:
if check_perm(request.user, TaskInfo.objects.get(id=task_id), AUTH_UPDATE):
if TaskInfo.objects.update_task(task_id, task_name=task_name, env_name=env_name,
project_name=project_name,
module_name=module_name, receiver_email=deal_emails(receiver_email.split(';')),
case_list=case_list,
start_time=start_time, is_loop=is_loop,
interval_minute=interval_minute):
return JsonResponse(get_ajax_msg(1, 1, '修改任务成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '修改任务失败', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有修改该任务的权限'))
else:
return JsonResponse(get_ajax_msg(0, 0, '请先停止任务', {}))
except:
return JsonResponse(get_ajax_msg(0, 0, '该任务不存在', {}))
else:
msg = get_validate_form_msg(task_form)
return JsonResponse(get_ajax_msg(0, 1, msg))
def task_run(request):
global is_timer_start
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if not task.is_run:
if task.start_time > datetime.datetime.now(): # 任务开始时间必须大于当前时间
pass
else:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
# if not is_timer_start:
# is_timer_start = True
# start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
# start_task_timer.start()
run_task_list.append(task)
task.is_run = True
task.save()
connection.close()
return JsonResponse(get_ajax_msg(1, 1, '该任务成功运行'))
else:
connection.close()
return JsonResponse(get_ajax_msg(0, 0, '该任务正在运行'))
def task_stop(request):
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if task.is_run:
task.is_run = False
task.fail_times = 0
task.save()
# if task in run_task_list:
# run_task_list.remove(task) # 从运行任务列表中删除该任务
try:
# jobs = run_job_dict[task.id]
# for job in jobs:
schedule.cancel_job(task.id)
except KeyError:
print('非循环任务')
return JsonResponse(get_ajax_msg(1, 1, '该任务成功停止'))
else:
return JsonResponse(get_ajax_msg(0, 0, '该任务没有运行'))
def task_monitor(request):
if request.method == 'GET':
return render(request, 'api/task_monitor.html')
if request.method == 'POST':
index = int(request.POST.get('index'))
search_task_name = request.POST.get('task_name')
start = (index - 1) * 10
res = requests.get('http://127.0.0.1:5555/api/tasks?limit=1000') # 控制查询最大数目为1000,以解决查询卡顿的问题
results = json.loads(res.content)
monitor_result_list = []
for result in results.values():
try:
task_dict = {}
args = result['args'].split(',')
# 获取任务信息
infos = args[1].split('-')
if '定时任务' in infos[0]:
task_name = infos[1]
case_name = infos[2]
report_uuid = args[4].split("'")[1]
task_dict['task_name'] = task_name
task_dict['case_name'] = case_name
task_dict['state'] = result['state']
task_dict['result'] = result['result']
task_dict['received'] = result['received']
task_dict['started'] = result['started']
task_dict['runtime'] = result['runtime']
task_dict['report_uuid'] = report_uuid
if search_task_name is not None:
if search_task_name in task_dict['task_name']:
monitor_result_list.append(task_dict)
else:
monitor_result_list.append(task_dict)
except Exception as e:
print('数据解析异常:' + e)
# 根据任务开始时间降序排列
for i in range(len(monitor_result_list) - 1):
for j in range(len(monitor_result_list) - i - 1):
if monitor_result_list[j]['received'] < monitor_result_list[j + 1]['received']:
monitor_result_list[j], monitor_result_list[j + 1] = monitor_result_list[j + 1], monitor_result_list[j]
data = dataToJson(monitor_result_list[start: start + 10])
return JsonResponse(get_ajax_msg(1, 1, '获取监控任务列表成功', {'monitors': data, 'count': len(monitor_result_list), 'currPage': index}))
def thread_run_case(**kwargs):
case_id = kwargs['case_id']
base_url = kwargs['base_url']
task_name = kwargs['task_name']
task_id = kwargs['task_id']
threading.Thread(target=run_case, args=(base_url, case_id, task_name, task_id)).start()
def run_case(base_url, case_id, task_name, task_id):
report_id = run_case_by_id(base_url, case_id, task_name,"定时任务",isTask=True)
time.sleep(5) # 等待报告信息写入数据库
reports = ReportInfo.objects.all().filter(report_id=report_id)
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) > 0:
task = tasks[0]
if len(reports) == 0:
# 若没有此条报告,则认为用例成功,不再需要后续操作
if len(tasks) > 0:
task.fail_times = 0
task.save()
else:
response_result = get_response_result(report_id)
if response_result != True:
task.fail_times += 1
task.save()
# 存失败记录
failRecord = TaskFailedRecord(task_id=task,report_id=reports[0].id,time=datetime.datetime.fromtimestamp(reports[0].test_time))
failRecord.save()
if task.fail_times % 2 == 0 and task.fail_times != 0:
receivers = task.receiver_email.split(';')
for receiver in receivers:
send_warn_mail(task_name, receiver, reports[0].id)
connection.close() # 避免造成mysql连接数过多的问题
def get_response_result(report_id):
response_result = True
try:
reports = ReportInfo.objects.all().filter(report_id=report_id)
if len(reports) > 0:
report = reports[0]
# print(report.result_data)
summury = json.loads(report.result_data)
stat = summury['stat']
if stat['successes'] != stat['testsRun']:
response_result = False
except Exception as e:
print('get_response_code e=====>', e)
return response_result
def send_warn_mail(task_name, receiver, report_id):
tips = task_name + ':监控到接口发生异常!查看报告地址:http://qa.15166.com/api/get_report/?id=' + str(report_id)
try:
email_title = "Joy_QA_Platform 定时任务监控接口"
email_body = tips
# 使用Django内置函数完成邮件发送。四个参数:主题,邮件内容,从哪里发,接受者list
send_status = send_mail(email_title, email_body, EMAIL_FROM, [receiver])
except Exception as e:
print(e)
def task2Dict(task):
task_dict = {}
task_dict["id"] = task.id
task_dict["task_name"] = task.task_name
task_dict["belong_env"] = task.belong_env_id
task_dict["belong_project"] = task.belong_project_id
task_dict["belong_module"] = task.belong_module_id
task_dict["receiver_email"] = task.receiver_email
task_dict["case_id_list"] = []
task_dict["case_name_list"] = []
task_dict["start_time"] = task.start_time
task_dict["is_loop"] = task.is_loop
task_dict["interval_minute"] = task.interval_minute
task_dict["is_run"] = task.is_run
task_dict["fail_times"] = task.fail_times
cases = task.cases.all()
for case in cases:
id = case.id
task_dict["case_id_list"].append(case.id)
task_dict["case_name_list"].append(case.name)
return task_dict
def append_env_dict(task, env_dict):
env_id = task.belong_env_id
env_name = task.belong_env.env_name
env_dict[str(env_id)] = env_name
def append_project_dict(task, project_dict):
project_id = task.belong_project_id
project_name = task.belong_project.project_name
project_dict[str(project_id)] = project_name
def append_module_dict(task, module_dict):
module_id = task.belong_module_id
module_name = task.belong_module.module_name
module_dict[str(module_id)] = module_name
def get_url_from_task(task):
envs = EnvInfo.objects.filter(id=task.belong_env_id)
env = envs[0]
return env.host_port
class StartTaskTimer(threading.Thread):
def __init__(self, run_task_list, run_job_dict):
threading.Thread.__init__(self)
self.run_task_list = run_task_list
self.run_job_dict = run_job_dict
def run(self):
while True:
# lst = self.run_task_list[::]
tasks = get_running_tasks()
for task in tasks:
now = datetime.datetime.now()
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)):
if task.is_loop:
self.run_job_dict[task.id] = start_loop_task(task, thread_run_case)
else:
start_task(task, thread_run_case)
task.is_run = False
task.fail_times = 0
task.save()
# self.run_task_list.remove(task)
else:
pass
time.sleep(5)
mutex = threading.Lock()
def get_running_tasks():
global mutex
with mutex:
result = []
tasks = TaskInfo.objects.filter(is_run=True,is_loop=True)
now = datetime.datetime.now()
for task in tasks:
# 排除可能的重复执行
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)) and (now - task.last_run_time > datetime.timedelta(seconds=5)):
result.append(task)
task.last_run_time = now
task.save()
# if datetime.datetime.now() - task.last_run_time > datetime.timedelta(seconds=task.interval_minute * 60 - 5):
# result.append(task)
connection.close()
if len(result) > 0:
for i in result:
print("获取到任务:",i.task_name)
return result
def start_loop_task(task, func):
base_url = get_url_from_task(task)
jobs = []
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
job = schedule.every(task.interval_minute).minutes.do(thread_run_case, case_id=case.id,
base_url=base_url, task_name=task_name, task_id=task.id)
cache.set("qa_paltform_loop_jobs_"+str(datetime.datetime.now()),pickle.dumps(job),timeout=None)
flag = cache.get("qa_test_platform_running_flag")
# print("flag==="+str(flag))
if flag != 1:
schedule.run_continuously()
# 一定要添加过期时间,否则当值过期时还会起新的线程(发现默认过期时间5分钟,这是django-redis组件和原生redis的区别)
cache.set("qa_test_platform_running_flag",1,timeout=None)
return jobs
def start_task(task, func):
base_url = get_url_from_task(task)
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
def get_task_name(task, case):
name = '定时任务' + '-' + task.task_name + '-' + case.name
return name
def filter_tasks_for_user(user, tasks, perm):
results = []
for task in tasks:
project = task.belong_project
if user.has_perm(perm, project):
results.append(task)
return results
def check_perm(user, task, perm):
project = task.belong_project
return user.has_perm(perm, project)
def restart_running_task():
# 清除redis中的任务缓存
cache.delete_pattern("qa_paltform_loop_jobs_*")
# 清除redis中的分布式锁,避免偶发的锁出现问题,任务会在执行器中的run_pending阻塞
cache.delete_pattern('*qa_test_platform_get')
# 增加是否已经启动了线程的标记,避免每增加一个执行任务就启动一次线程,可能导致任务重复执行
cache.delete_pattern('qa_test_platform_running_flag')
print("清除任务缓存、清除锁、清除线程启动标记")
start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
start_task_timer.start()
tasks = TaskInfo.objects.filter(is_run=True, is_loop=True)
count = 0
for task in tasks:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10*(count+1))
task.save()
count = count + 1
connection.close() # 避免造成mysql连接数过多的问题
def validate_emails(emails):
for email in emails:
if len(email) == 0:
continue
if re.match("^[A-Z0-9a-z._%+-]+" + EMAIL_SUFFIX, email) is None:
return False
return True
def deal_emails(emails):
result = []
for email in emails:
if email not in result:
result.append(email)
resultEmail = ""
for email in result:
resultEmail = resultEmail + ";" + email
return resultEmail[1:]
|
stage_op_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class StageTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32])
stage = stager.put([v])
y = stager.get()
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.session(graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32, dtypes.float32])
stage = stager.put([x, v])
z, y = stager.get()
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.StagingArea([dtypes.float32])
y = stager.put([v])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
x = stager.get()[0]
self.assertEqual(x.device, '/device:CPU:0')
G.finalize()
@test_util.run_deprecated_v1
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put([x])
peek = stager.peek(p)
ret = stager.get()
G.finalize()
with self.session(graph=G) as sess:
for i in range(10):
sess.run(stage, feed_dict={x: i})
for i in range(10):
self.assertTrue(sess.run(peek, feed_dict={p: i}) == [i])
@test_util.run_deprecated_v1
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
size = stager.size()
clear = stager.clear()
G.finalize()
with self.session(graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testCapacity(self):
self.skipTest('b/123423516 this test is flaky on gpu.')
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(sess.run(ret) == [i])
# It should now be empty
self.assertTrue(sess.run(size) == 0)
@test_util.run_deprecated_v1
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.uint8,
], memory_limit=memory_limit, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.session(graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: np.full(chunk, i, dtype=np.uint8)})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(np.all(sess.run(ret)[0] == i))
self.assertTrue(sess.run(size) == 0)
if __name__ == '__main__':
test.main()
|
test_fx.py | import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
return
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'linalg.multi_dot',
'lu',
'norm',
'polygamma',
'special.polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'special.zeta',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rand__',
'__ror__',
'__rxor__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
mod = torch.fx
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(mod, ['torch', 'fx'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
braket_container.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import errno
import importlib
import os
import json
import shutil
import subprocess
import sys
import multiprocessing
from pathlib import Path
from urllib.parse import urlparse
from typing import Tuple
import boto3
OPT_ML = os.path.join("/opt", "ml")
OPT_BRAKET = os.path.join("/opt", "braket")
CUSTOMER_CODE_PATH = os.path.join(OPT_BRAKET, "code", "customer_code")
ORIGINAL_CUSTOMER_CODE_PATH = os.path.join(CUSTOMER_CODE_PATH, "original")
EXTRACTED_CUSTOMER_CODE_PATH = os.path.join(CUSTOMER_CODE_PATH, "extracted")
ERROR_LOG_PATH = os.path.join(OPT_ML, "output")
ERROR_LOG_FILE = os.path.join(ERROR_LOG_PATH, "failure")
SETUP_SCRIPT_PATH = os.path.join(OPT_BRAKET, "additional_setup")
print("Boto3 Version: ", boto3.__version__)
def log_failure_and_exit(*args):
"""
Log failures to a file so that it can be parsed by the backend service and included in
failure messages for a job. Exists with code 0.
Args:
args: variable list of text to write to the file.
"""
Path(ERROR_LOG_PATH).mkdir(parents=True, exist_ok=True)
with open(ERROR_LOG_FILE, 'a') as error_log:
for text in args:
error_log.write(text)
print(text)
sys.exit(0)
def create_paths():
"""
These paths are created early on so that the rest of the code can assume that the directories
are available when needed.
"""
Path(CUSTOMER_CODE_PATH).mkdir(parents=True, exist_ok=True)
Path(ORIGINAL_CUSTOMER_CODE_PATH).mkdir(parents=True, exist_ok=True)
Path(EXTRACTED_CUSTOMER_CODE_PATH).mkdir(parents=True, exist_ok=True)
Path(SETUP_SCRIPT_PATH).mkdir(parents=True, exist_ok=True)
def create_symlink():
"""
The ML paths are inserted by the backend service by default. To prevent confusion we link
the Braket paths to it (to unify them), and use the Braket paths from now on.
"""
try:
os.symlink(OPT_ML, OPT_BRAKET)
except OSError as e:
if e.errno != errno.EEXIST:
print(f"Got unexpected exception: {e}")
log_failure_and_exit(f"Symlink failure.\n Exception: {e}")
def download_s3_file(s3_uri : str, local_path : str) -> str:
"""
Downloads a file to a local path.
Args:
s3_uri (str): the S3 URI to get the file from.
local_path (str) : the local path to download to
Returns:
str: the path to the file containing the downloaded path.
"""
s3_client = boto3.client("s3")
parsed_url = urlparse(s3_uri, allow_fragments=False)
s3_bucket = parsed_url.netloc
s3_key = parsed_url.path.lstrip("/")
local_s3_file = os.path.join(local_path, os.path.basename(s3_key))
s3_client.download_file(s3_bucket, s3_key, local_s3_file)
return local_s3_file
def download_customer_code(s3_uri : str) -> str:
"""
Downloads the customer code to the original customer path. The code is assumed to be a single
file in S3. The file may be a compressed archive containing all the customer code.
Args:
s3_uri (str): the S3 URI to get the code from.
Returns:
str: the path to the file containing the code.
"""
try:
return download_s3_file(s3_uri, ORIGINAL_CUSTOMER_CODE_PATH)
except Exception as e:
log_failure_and_exit(f"Unable to download code.\nException: {e}")
def unpack_code_and_add_to_path(local_s3_file : str, compression_type : str):
"""
Unpack the customer code, if necessary. Add the customer code to the system path.
Args:
local_s3_file (str): the file representing the customer code.
compression_type (str): if the customer code is stored in an archive, this value will
represent the compression type of the archive.
"""
if compression_type and compression_type.strip().lower() in ["gzip", "zip"]:
try:
shutil.unpack_archive(local_s3_file, EXTRACTED_CUSTOMER_CODE_PATH)
except Exception as e:
log_failure_and_exit(
f"Got an exception while trying to unpack archive: {local_s3_file} of type: "
f"{compression_type}.\nException: {e}"
)
else:
shutil.move(local_s3_file, EXTRACTED_CUSTOMER_CODE_PATH)
sys.path.append(EXTRACTED_CUSTOMER_CODE_PATH)
def kick_off_customer_script(entry_point : str) -> multiprocessing.Process:
"""
Runs the customer script as a separate process.
Args:
entry_point (str): the entry point to the customer code, represented as <module>:<method>.
Returns:
Process: the process handle to the running process.
"""
try:
str_module, _, str_method = entry_point.partition(":")
customer_module = importlib.import_module(str_module)
customer_method = getattr(customer_module, str_method)
customer_code_process = multiprocessing.Process(target=customer_method)
customer_code_process.start()
except Exception as e:
log_failure_and_exit(f"Unable to run job at entry point {entry_point}\nException: {e}")
return customer_code_process
def join_customer_script(customer_code_process : multiprocessing.Process):
"""
Joins the process running the customer code.
Args:
customer_code_process (Process): the process running the customer code.
"""
try:
customer_code_process.join()
except Exception as e:
log_failure_and_exit(f"Job did not exit gracefully.\nException: {e}")
def get_code_setup_parameters() -> Tuple[str, str, str]:
"""
Returns the code setup parameters:
s3_uri: the S3 location where the code is stored.
entry_point: the entrypoint into the code.
compression_type: the compression used to archive the code (optional)
These values are stored in environment variables, however, we also allow the storing of
these values in the hyperparameters to facilitate testing in local mode.
If the s3_uri or entry_point can not be found, the script will exit with an error.
Returns:
str, str, str: the code setup parameters as described above.
"""
s3_uri = os.getenv('AMZN_BRAKET_SCRIPT_S3_URI')
entry_point = os.getenv('AMZN_BRAKET_SCRIPT_ENTRY_POINT')
compression_type = os.getenv('AMZN_BRAKET_SCRIPT_COMPRESSION_TYPE')
if s3_uri and entry_point:
return s3_uri, entry_point, compression_type
hyperparameters_env = os.getenv('SM_HPS')
if hyperparameters_env:
try:
hyperparameters = json.loads(hyperparameters_env)
if not s3_uri:
s3_uri = hyperparameters.get("AMZN_BRAKET_SCRIPT_S3_URI")
if not entry_point:
entry_point = hyperparameters.get("AMZN_BRAKET_SCRIPT_ENTRY_POINT")
if not compression_type:
compression_type = hyperparameters.get("AMZN_BRAKET_SCRIPT_COMPRESSION_TYPE")
except Exception as e:
log_failure_and_exit("Hyperparameters not specified in env")
if not s3_uri:
log_failure_and_exit("No customer script specified")
if not entry_point:
log_failure_and_exit("No customer entry point specified")
return s3_uri, entry_point, compression_type
def run_customer_code_as_process(entry_point : str) -> int:
"""
When provided the name of the package and the method to run, we run them as a process.
Args:
entry_point (str): the code to run in the format <package>:<method>.
Returns:
int: The exit code of the customer code run.
"""
print("Running Code As Process")
customer_code_process = kick_off_customer_script(entry_point)
join_customer_script(customer_code_process)
print("Code Run Finished")
return customer_code_process.exitcode
def run_customer_code_as_subprocess(entry_point : str) -> int:
"""
When provided just the name of the module to run, we run it as a subprocess.
Args:
entry_point (str): the name of the module to run.
Returns:
int: The exit code of the customer code run.
"""
print("Running Code As Subprocess")
try:
result = subprocess.run(["python", "-m", entry_point], cwd=EXTRACTED_CUSTOMER_CODE_PATH)
except Exception as e:
log_failure_and_exit(f"Unable to run job at entry point {entry_point}\nException: {e}")
print("Code Run Finished")
return_code = result.returncode
return return_code
def run_customer_code() -> None:
"""
Downloads and runs the customer code. If the customer code exists
with a non-zero exit code, this function will log a failure and
exit.
"""
s3_uri, entry_point, compression_type = get_code_setup_parameters()
local_s3_file = download_customer_code(s3_uri)
unpack_code_and_add_to_path(local_s3_file, compression_type)
if entry_point.find(":") >= 0:
exit_code = run_customer_code_as_process(entry_point)
else:
exit_code = run_customer_code_as_subprocess(entry_point)
if exit_code != 0:
log_failure_and_exit(f"Job at {entry_point} exited with exit code: {exit_code}")
def setup_and_run():
"""
This method sets up the Braket container, then downloads and runs the customer code.
"""
print("Beginning Setup")
create_symlink()
create_paths()
run_customer_code()
if __name__ == "__main__":
setup_and_run()
|
InMemoryCluster.py | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import Queue
import time
import threading
import uuid
import ufora.config.Setup as Setup
import ufora.cumulus.distributed.CumulusActiveMachines as CumulusActiveMachines
import ufora.cumulus.distributed.CumulusGatewayRemote as CumulusGatewayRemote
import ufora.cumulus.distributed.CumulusService as CumulusService
import ufora.distributed.SharedState.Connections.InMemoryChannelFactory \
as InMemorySharedStateChannelFactory
import ufora.distributed.SharedState.Connections.ViewFactory as ViewFactory
import ufora.distributed.SharedState.SharedStateService as SharedStateService
import ufora.networking.MultiChannelListener as MultiChannelListener
import ufora.native.StringChannel as StringChannelNative
import ufora.util.ManagedThread as ManagedThread
import ufora.FORA.VectorDataManager.VectorDataManager as VectorDataManager
import ufora.native.CallbackScheduler as CallbackScheduler
import ufora.native.Cumulus as CumulusNative
DEFAULT_MAX_RAM_CACHE_SIZE = 125 * 1024 * 1024
DEFAULT_VECTOR_RAM_CACHE_SIZE = 100 * 1024 * 1024
DEFAULT_PER_MACHINE_THROUGHPUT = 200 * 1024 * 1024
DEFAULT_THREAD_COUNT = 2
class InMemoryChannelListener(object):
def __init__(self, address, port, channelManager):
self.address = address
self.port = port
self.channelManager = channelManager
self.connectionCallback = None
self.isStarted = threading.Event()
def __str__(self):
return "InMemoryChannelListener(%s, %d)" % (self.address, self.port)
def start(self):
logging.info("Registering channel listener on %s:%d", self.address, self.port)
self.channelManager.registerChannelListener(self.address,
self.port,
self.onChannelConnected)
self.isStarted.set()
def stop(self):
self.channelManager.unregisterChannelListener(self.address, self.port)
def blockUntilReady(self):
t0 = time.time()
self.isStarted.wait()
logging.info("Took %s to blockUntilReady", time.time() - t0)
def registerConnectCallback(self, callback):
self.connectionCallback = callback
def onChannelConnected(self, channel):
if self.connectionCallback is not None:
self.connectionCallback(channel)
class InMemoryChannelManager(object):
def __init__(self, callbackScheduler, throughput):
self.lock = threading.Lock()
self.listeners = {}
self.rateLimitedChannelGroupsForEachListener = {}
self.callbackScheduler = callbackScheduler
self.perMachineThroughput = throughput
def registerChannelListener(self, address, port, connectionCallback):
with self.lock:
assert (address, port) not in self.listeners
key = (str(address), int(port))
self.listeners[key] = connectionCallback
self.rateLimitedChannelGroupsForEachListener[key] = (
StringChannelNative.createRateLimitedStringChannelGroup(
self.callbackScheduler,
self.perMachineThroughput
)
)
def unregisterChannelListener(self, address, port):
with self.lock:
if (address, port) in self.listeners:
del self.listeners[(address, port)]
del self.rateLimitedChannelGroupsForEachListener[(address, port)]
def createChannelFactory(self):
return RateLimitedInMemoryCumulusChannelFactory(self)
def createChannelToEndpoint(self, ipEndpoint):
with self.lock:
assert len(ipEndpoint) == 2
# we expect addresses to be strings and ports to be ints
ipEndpoint = (str(ipEndpoint[0]), int(ipEndpoint[1]))
assert ipEndpoint in self.listeners, (
"No listeners registered on endpoint: %s. Registered listeners: %s" %
(ipEndpoint, self.listeners)
)
clientChannel, serverChannel = StringChannelNative.InMemoryStringChannel(self.callbackScheduler)
rateLimitedServerChannel = (
self.rateLimitedChannelGroupsForEachListener[ipEndpoint].wrap(
serverChannel
)
)
self.listeners[ipEndpoint](rateLimitedServerChannel.makeQueuelike(self.callbackScheduler))
return clientChannel
class RateLimitedInMemoryCumulusChannelFactory(object):
def __init__(self, channelManager):
self.channelManager = channelManager
self.rateLimitedChannelGroup = (
StringChannelNative.createRateLimitedStringChannelGroup(
self.channelManager.callbackScheduler,
self.channelManager.perMachineThroughput
)
)
def createChannel(self, ipEndpoint):
return self.rateLimitedChannelGroup.wrap(
self.channelManager.createChannelToEndpoint(ipEndpoint)
)
IN_MEMORY_CLUSTER_SS_PING_INTERVAL = 10.0
class InMemoryClient(object):
def __init__(self, simulation):
self.clusterName = 'test'
self.simulation = simulation
self.ownAddress = str(uuid.uuid4())
self.desire = {}
def getClusterName(self):
return self.clusterName
def getAssignedMachineName(self):
return 'machine@%s' % self.ownAddress
def getWorkerStatusCounts(self):
return self.desire, self.desire
def getOwnAddressInternal(self):
return self.ownAddress
def desireNumberOfWorkers(self, numWorkers, blocking = False, timeout = 10.0):
self.simulation.desireNumCumuli(numWorkers, blocking, timeout)
self.desire[('ec2', 'm1.large', 2)] = (numWorkers, 3600)
logging.info("DESIRING WORKERS %s", numWorkers)
def desireNumberOfWorkersByType(self, typeDict, blocking = False, timeout = 10.0):
self.simulation.desireNumCumuli(sum(typeDict.values()), blocking, timeout)
self.desire = typeDict
logging.info("InMemoryClient added workers: %s", typeDict)
class InMemoryClusterManagerConnection(object):
'''
Used by Mediator to talk to the inMemory cluster
'''
def __init__(self, inMemoryCluster):
self.inMemoryCluster = inMemoryCluster
self._onClusterResponse = None
self.desires = {}
self.callbackQueue = Queue.Queue()
self.callbackThread = ManagedThread.ManagedThread(target=self.callbackLoop)
def registerOnClusterResponseCallback(self, callback):
self._onClusterResponse = callback
def callbackLoop(self):
callback = self.callbackQueue.get()
while callback is not None:
callback()
time.sleep(.01)
callback = self.callbackQueue.get()
def startService(self):
self.callbackThread.start()
def stopService(self):
self.callbackQueue.put(None)
self.callbackThread.join()
def publishDesireToClusterManager(self, user, desire):
self.desires[user] = desire
machineSum = 0
for user in self.desires:
machineSum += sum(d[0] for d in self.desires[user].itervalues())
self.inMemoryCluster.desireNumCumuli(machineSum)
class InMemoryCluster(object):
def __init__(self,
cumulusVectorRamCacheSizeOverride = DEFAULT_VECTOR_RAM_CACHE_SIZE,
cumulusMaxRamCacheSizeOverride = DEFAULT_MAX_RAM_CACHE_SIZE,
cumulusThreadCountOverride = DEFAULT_THREAD_COUNT,
remoteGatewayRamCacheSizeOverride = DEFAULT_MAX_RAM_CACHE_SIZE,
perMachineThroughput = DEFAULT_PER_MACHINE_THROUGHPUT
):
self.cumulusMaxRamCacheSizeOverride = cumulusMaxRamCacheSizeOverride
self.cumulusVectorRamCacheSizeOverride = cumulusVectorRamCacheSizeOverride
self.callbackSchedulerFactory = CallbackScheduler.createSimpleCallbackSchedulerFactory()
self.callbackScheduler = self.callbackSchedulerFactory.createScheduler("InMemoryCluster", 1)
self.cumulusThreadCountOverride = cumulusThreadCountOverride
self.remoteGatewayCacheSize = remoteGatewayRamCacheSizeOverride
self.sharedStateManager = SharedStateService.KeyspaceManager(
10001,
1,
cachePathOverride="",
pingInterval = IN_MEMORY_CLUSTER_SS_PING_INTERVAL,
maxOpenFiles=100
)
self.sharedStateChannelFactory = (
InMemorySharedStateChannelFactory.InMemoryChannelFactory(
self.callbackScheduler,
self.sharedStateManager
)
)
self.sharedStateViewFactory = ViewFactory.ViewFactory(self.sharedStateChannelFactory)
self.client = InMemoryClient(self)
self.cumuli = []
self.nextCumulusAddress = 0
self.channelManager = InMemoryChannelManager(self.callbackScheduler, perMachineThroughput)
self.inMemoryDemuxingChannel = \
InMemorySharedStateChannelFactory.SerializedToManagerChannelFactory(
self.callbackScheduler,
self.sharedStateManager,
"SharedState"
)
def disconnectAllWorkersFromSharedState(self):
self.sharedStateChannelFactory.disconnectAllChannels()
def createMultiChannelListener(self, callbackScheduler, ports, address):
def makeChannelListener(cbScheduler, port, portScanIncrement=0):
return self.createChannelListener(cbScheduler, address, port)
return MultiChannelListener.MultiChannelListener(callbackScheduler,
ports,
makeChannelListener)
def createChannelListener(self, callbackScheduler, address, port):
return InMemoryChannelListener(address, port, self.channelManager)
#def getDesirePublisher(self, user):
#with self.sharedStateViewFactory:
#desirePublisher = SynchronousDesirePublisher.SynchronousDesirePublisher(user)
#desirePublisher.startService()
#return desirePublisher
def createCumulusGateway(self, callbackScheduler, vdm=None):
logging.info("InMemoryCluster creating a RemoteGateway")
return CumulusGatewayRemote.RemoteGateway(
self.callbackScheduler,
VectorDataManager.constructVDM(
self.callbackScheduler,
self.remoteGatewayCacheSize
),
self.channelManager.createChannelFactory(),
CumulusActiveMachines.CumulusActiveMachines(
self.sharedStateViewFactory
),
self.sharedStateViewFactory
)
def start(self):
pass
def stop(self):
for service in self.cumuli:
logging.info("stopping in-memory cumulus: %s", service)
service.stopService()
def teardown(self):
self.client = None
self.cumuli = None
self.channelManager = None
self.sharedStateManager = None
import gc
gc.collect()
def createServiceAndServiceThread(self):
config = Setup.config()
config.cumulusMaxRamCacheMB = self.cumulusMaxRamCacheSizeOverride / 1024 / 1024
config.cumulusVectorRamCacheMB = self.cumulusVectorRamCacheSizeOverride / 1024 / 1024
config.cumulusTrackTcmalloc = False
config.cumulusServiceThreadCount = self.cumulusThreadCountOverride
config.cumulusDiskCacheStorageSubdirectory = str(uuid.uuid4())
ownAddress = str(uuid.uuid4())
callbackScheduler = self.callbackSchedulerFactory.createScheduler(
"InMemoryClusterChild",
1)
channelListener = self.createMultiChannelListener(
callbackScheduler,
[Setup.config().cumulusControlPort, Setup.config().cumulusDataPort],
ownAddress)
service = CumulusService.CumulusService(
ownAddress=ownAddress,
channelListener=channelListener,
channelFactory=self.channelManager.createChannelFactory(),
eventHandler=CumulusNative.CumulusWorkerHoldEventsInMemoryEventHandler(),
callbackScheduler=callbackScheduler,
diagnosticsDir=None,
config=config,
viewFactory=self.sharedStateViewFactory
)
service.startService(lambda: None)
return service
def desireNumCumuli(self, numNodes, blocking=False, timeout=20.0):
maxTime = time.time() + timeout
if numNodes > len(self.cumuli):
newCumuli = []
while numNodes > len(self.cumuli):
cumulus = self.createServiceAndServiceThread()
logging.info("InMemory cluster started a cumulus node: %s, %s", cumulus, cumulus.machineId)
self.cumuli.append(cumulus)
newCumuli.append(cumulus)
if blocking:
for cumulus in newCumuli:
# Wait up to timeout seconds for the other cumuli to discover the new machine
success = False
while time.time() < maxTime:
missing = False
for c in self.cumuli:
if cumulus == c:
continue
if cumulus.machineId not in c.connectedMachines:
logging.info("Machine %s not connected to %s", cumulus.machineId, c.machineId)
missing = True
if not missing:
success = True
break
else:
time.sleep(0.1)
assert success
if numNodes < len(self.cumuli):
stoppedCumuli = []
while numNodes < len(self.cumuli):
cumulus = self.cumuli.pop()
cumulus.stopService()
stoppedCumuli.append(cumulus)
if blocking:
for cumulus in stoppedCumuli:
# Wait up to timeout seconds for the other cumuli to discover the dropped machine
success = False
while time.time() < maxTime:
present = False
for c in self.cumuli:
if cumulus == c:
continue
if cumulus.machineId in c.connectedMachines:
present = True
if not present:
success = True
break
else:
time.sleep(0.1)
assert success
logging.info("InMemory cluster killed a cumulus node: %s, %s", cumulus, cumulus.machineId)
assert len(self.cumuli) == numNodes
|
progress_indicator.py | from __future__ import annotations
import re
import threading
import time
from contextlib import contextmanager
from typing import TYPE_CHECKING
from cleo._utils import format_time
from cleo.io.io import IO
if TYPE_CHECKING:
from cleo.io.outputs.output import Output
class ProgressIndicator:
"""
A process indicator.
"""
NORMAL = " {indicator} {message}"
NORMAL_NO_ANSI = " {message}"
VERBOSE = " {indicator} {message} ({elapsed:6s})"
VERBOSE_NO_ANSI = " {message} ({elapsed:6s})"
VERY_VERBOSE = " {indicator} {message} ({elapsed:6s})"
VERY_VERBOSE_NO_ANSI = " {message} ({elapsed:6s})"
def __init__(
self,
io: IO | Output,
fmt: str | None = None,
interval: int = 100,
values: list[str] | None = None,
) -> None:
if isinstance(io, IO):
io = io.error_output
self._io = io
if fmt is None:
fmt = self._determine_best_format()
self._fmt = fmt
if values is None:
values = ["-", "\\", "|", "/"]
if len(values) < 2:
raise ValueError(
"The progress indicator must have at "
"least 2 indicator value characters."
)
self._interval = interval
self._values = values
self._message = None
self._update_time = None
self._started = False
self._current = 0
self._auto_running = None
self._auto_thread = None
self._start_time = None
self._last_message_length = 0
@property
def message(self) -> str | None:
return self._message
def set_message(self, message: str | None) -> None:
self._message = message
self._display()
@property
def current_value(self) -> str:
return self._values[self._current % len(self._values)]
def start(self, message: str) -> None:
if self._started:
raise RuntimeError("Progress indicator already started.")
self._message = message
self._started = True
self._start_time = time.time()
self._update_time = self._get_current_time_in_milliseconds() + self._interval
self._current = 0
self._display()
def advance(self) -> None:
if not self._started:
raise RuntimeError("Progress indicator has not yet been started.")
if not self._io.is_decorated():
return
current_time = self._get_current_time_in_milliseconds()
if current_time < self._update_time:
return
self._update_time = current_time + self._interval
self._current += 1
self._display()
def finish(self, message: str, reset_indicator: bool = False) -> None:
if not self._started:
raise RuntimeError("Progress indicator has not yet been started.")
if self._auto_thread is not None:
self._auto_running.set()
self._auto_thread.join()
self._message = message
if reset_indicator:
self._current = 0
self._display()
self._io.write_line("")
self._started = False
@contextmanager
def auto(self, start_message: str, end_message: str):
"""
Auto progress.
"""
self._auto_running = threading.Event()
self._auto_thread = threading.Thread(target=self._spin)
self.start(start_message)
self._auto_thread.start()
try:
yield self
except (Exception, KeyboardInterrupt):
self._io.write_line("")
self._auto_running.set()
self._auto_thread.join()
raise
self.finish(end_message, reset_indicator=True)
def _spin(self) -> None:
while not self._auto_running.is_set():
self.advance()
time.sleep(0.1)
def _display(self) -> None:
if self._io.is_quiet():
return
self._overwrite(
re.sub(
r"(?i){([a-z\-_]+)(?::([^}]+))?}", self._overwrite_callback, self._fmt
)
)
def _overwrite_callback(self, matches):
if hasattr(self, f"_formatter_{matches.group(1)}"):
text = str(getattr(self, f"_formatter_{matches.group(1)}")())
else:
text = matches.group(0)
return text
def _overwrite(self, message) -> None:
"""
Overwrites a previous message to the output.
"""
if self._io.is_decorated():
self._io.write("\x0D\x1B[2K")
self._io.write(message)
else:
self._io.write_line(message)
def _determine_best_format(self):
decorated = self._io.is_decorated()
if self._io.is_very_verbose():
if decorated:
return self.VERY_VERBOSE
return self.VERY_VERBOSE_NO_ANSI
elif self._io.is_verbose():
if decorated:
return self.VERY_VERBOSE
return self.VERBOSE_NO_ANSI
if decorated:
return self.NORMAL
return self.NORMAL_NO_ANSI
def _get_current_time_in_milliseconds(self) -> int:
return round(time.time() * 1000)
def _formatter_indicator(self) -> str:
return self.current_value
def _formatter_message(self):
return self.message
def _formatter_elapsed(self) -> str:
return format_time(time.time() - self._start_time)
|
dns.py | from pymongo import MongoClient
import json
from datetime import datetime
import mysql.connector
import logging
from logging.handlers import RotatingFileHandler
from datetime import timezone
from elasticsearch import Elasticsearch
import pythonwhois
from time import sleep
from confluent_kafka import Consumer
logging.basicConfig(filename='log/dnslog.log', level=logging.ERROR,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger=logging.getLogger("dnslog.log")
handler = RotatingFileHandler("log/dnslog.log", maxBytes=2000, backupCount=25)
if not logger:
logger.addHandler(handler)
with open('config.json') as f:
config = json.load(f)
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def Main():
consumer = Consumer({
'bootstrap.servers': '192.168.100.80:9092',
'group.id': 'dns',
'client.id': 'sslcheck',
'enable.auto.commit': True,
'auto.commit.interval.ms': 1000,
'session.timeout.ms': 6000,
'default.topic.config': {'auto.offset.reset': 'latest'}})
consumer.subscribe(['domaindetails'])
try:
while True:
msg = consumer.poll(0.1)
if msg is None:
continue
elif not msg.error():
try:
print('Received message: {0}'.format(msg.value()))
d=json.loads(msg.value())
print (d)
if d["featureConfig"]["domainMonitor"] == True:
print("true")
run((d["domainId"],d["companyDomain"]))
else:
pass
#elif d["job"] == "single":
#run((d["id"],d["domain"]))
except:
pass
else:
print('Error occured: {0}'.format(msg.error().str()))
except KeyboardInterrupt:
pass
finally:
consumer.close()
return
def run(key):
ID=key[0]
hostname=key[1]
try:
DomainExpiry_data=DomainExpiry(key)
data={}
data['_id']=ID
data['hostname']=hostname
data["LastUpdate"]=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data['DomainExpiry']=DomainExpiry_data
msd(data)
# elasticsearch(data)
except Exception as e:
logger.error("------run---------" +str(e))
data={}
data['_id']=ID
data['hostname']=hostname
#print(key[1])
return
def bulkrun():
try:
worker_data = mySQL_read()
for i in worker_data:
#threading.Thread(target=run, args = (i,)).start()
run(i)
except Exception as e:
logger.error("------Main---------" +str(e))
return
def mySQL_read():
try:
asd=[]
mydb = mysql.connector.connect(
host= config['mysql']['host'],
user= config['mysql']['user'],
passwd= config['mysql']['passwd'],
database= config['mysql']['database']
)
mycursor = mydb.cursor()
mycursor.execute("SELECT company_domain, domain_id from login.client_domains")
#mycursor.execute("SELECT user_reg_id,company_domain FROM registered_user limit 0,5")
row = mycursor.fetchone()
while row is not None:
asd.append(row)
row = mycursor.fetchone()
except Exception as e:
logger.error("------mySQL_read---------" +str(e))
asd=[]
return asd
def DomainExpiry(key):
try:
sleep(25)
ID=key[0]
hostname=key[1]
details = pythonwhois.get_whois(hostname)
t = details['expiration_date'][0]
expiry_date=t.strftime('%m/%d/%Y')
time=details['expiration_date'][0]
expiry_time=time -datetime.now()
registrar=details['registrar'][0]
NS=details['nameservers']
DEI=repr(expiry_time.days)
except Exception as e:
if str(e) != 'expiration_date':
if (hostname.count('.') > 1):
if hostname.find('.') != -1:
n= hostname.find('.')
domain_s=hostname[n+1:]
val=(ID, domain_s)
return (DomainExpiry(val))
else:
expiry_date=''
DEI=''
registrar=''
NS=''
else:
expiry_date=''
DEI=''
registrar=''
NS=''
else:
logger.error(hostname +"--------DomainExpiry : ----------" + str(e))
#return (DomainExpiry(key))
expiry_date=''
DEI=''
registrar=''
NS=''
json_data={}
json_data["domainexpiryDate"] = expiry_date
json_data["domainDaysLeft"] = DEI
json_data["registrar"]=registrar
json_data["nameServer"]=NS
return json_data
def msd(data):
try:
client =MongoClient(config['mongodb']['host'],
username=config['mongodb']['username'],
password=config['mongodb']['password'],
authSource=config['mongodb']['authSource'])
db = client.check
collection = db.DomainExpiry12345
x=data
#collection.insert(x)
collection.update({'_id': x['_id']},{'$set':x}, upsert=True, multi=False)
except Exception as e:
logger.error("---------Mongodb connection :-------------- " +str(e))
return
def elasticsearch(data):
try:
data.pop("_id")
es=Elasticsearch([{'host':'35.200.240.123','port':9200}])
res = es.index(index='domainmonitor',doc_type='data',body=data)
except Exception as e:
logger.error("---------Elasticsearch :-------------- " +str(e))
return
if __name__ == "__main__":
Main()
|
LR5.py | """
Licensed under the Unlicense License;
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://unlicense.org
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import threading
import numpy as np
import cv2
import sys
from PyQt5 import QtWidgets
from PyQt5.QtGui import QPixmap
import qimage2ndarray
from keras_preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
import random
from tkinter import filedialog
import tensorflow as tf
import keras
import gui_5
INPUT_SHAPE = (64, 64, 3)
PREDICT_SHAPE = (-1, 64, 64, 3)
KERNEL_ACTIVATION = 'relu'
MLP_ACTIVATION = 'relu'
OPTIMIZER = 'adam'
class LR5(QtWidgets.QMainWindow, gui_5.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.btn_train_browse.clicked.connect(self.train_browse)
self.btn_train_load.clicked.connect(self.train_load)
self.btn_test_browse.clicked.connect(self.test_browse)
self.btn_test_load.clicked.connect(self.test_load)
self.btn_tfn_train.clicked.connect(self.tfn_train)
self.btn_tfn_save.clicked.connect(self.tfn_save)
self.btn_tfn_load.clicked.connect(self.tfn_load)
self.btn_tfn_predict.clicked.connect(self.tfn_predict)
self.btn_tfn_predict_camera.clicked.connect(self.predict_camera)
self.btn_tfn_predict_camera_stop.clicked.connect(self.predict_camera_stop)
self.train_data = None
self.test_data = None
self.model = None
self.camera_running = False
self.cv_cap = None
def train_browse(self):
file_path = filedialog.askdirectory()
if file_path is not None:
self.line_train_folder.setText(file_path)
def train_load(self):
datagen = ImageDataGenerator(rescale=1 / 255.0)
self.train_data = datagen.flow_from_directory(
self.line_train_folder.text(),
target_size=INPUT_SHAPE[:2],
batch_size=self.spin_train_n.value(),
class_mode='categorical',
shuffle=True
)
def test_browse(self):
file_path = filedialog.askdirectory()
if file_path is not None:
self.line_test_folder.setText(file_path)
def test_load(self):
datagen = ImageDataGenerator(rescale=1 / 255.0)
self.test_data = datagen.flow_from_directory(
self.line_test_folder.text(),
target_size=INPUT_SHAPE[:2],
batch_size=self.spin_test_n.value(),
class_mode='categorical'
)
def tfn_train(self):
# Create model
self.model = tf.keras.models.Sequential([
layers.Conv2D(64, (6, 6), padding='same', activation=KERNEL_ACTIVATION, input_shape=INPUT_SHAPE),
layers.Conv2D(64, (6, 6), padding='same', activation=KERNEL_ACTIVATION),
layers.MaxPooling2D((2, 2)),
#
layers.Conv2D(32, (2, 2), padding='same', activation=KERNEL_ACTIVATION),
layers.Conv2D(32, (2, 2), padding='same', activation=KERNEL_ACTIVATION),
layers.MaxPooling2D((2, 2)),
#
layers.Flatten(),
layers.Dense(128, activation=MLP_ACTIVATION),
layers.Dense(self.train_data.num_classes, activation='softmax')
])
# Compile model
self.model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'])
# Train model
self.model.fit(self.train_data,
epochs=self.spin_tfn_epochs.value(),
validation_data=self.test_data)
print('Training done.')
loss, accuracy = self.model.evaluate(self.test_data, verbose=2)
print('Accuracy: ' + str(accuracy))
def tfn_save(self):
self.model.save('LR5_data/model.h5')
def tfn_load(self):
self.model = keras.models.load_model('LR5_data/model.h5')
def tfn_predict(self):
class_names = []
for folder, dirs, files in os.walk(self.line_test_folder.text()):
for directory in dirs:
class_names.append(directory)
random_class = random.randrange(0, self.test_data.num_classes)
test_path = self.line_test_folder.text() + '/' + class_names[random_class]
random_image = random.randrange(0, len(os.listdir(test_path)))
img_array = cv2.cvtColor(cv2.imread(
os.path.join(test_path, os.listdir(test_path)[random_image])), cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img_array, INPUT_SHAPE[:2])
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(
cv2.resize(img_array, (320, 240), interpolation=cv2.INTER_NEAREST))))
new_array = np.expand_dims(new_array, axis=0)
new_array = np.array(new_array).reshape(PREDICT_SHAPE)
new_array = new_array / 255.0
result_1 = self.model.predict([new_array])
result = int(np.argmax(result_1[0]))
print(class_names[result] + ' / ' + class_names[random_class])
self.label_tfn_result.setText(class_names[result] + ' on photo. ' + class_names[random_class] + ' in reality.')
def predict_camera(self):
self.camera_running = True
self.cv_cap = cv2.VideoCapture(self.camera_id.value(), cv2.CAP_DSHOW)
thread = threading.Thread(target=self.camera_prediction)
thread.start()
def predict_camera_stop(self):
self.camera_running = False
def camera_prediction(self):
class_names = []
for folder, dirs, files in os.walk(self.line_test_folder.text()):
for directory in dirs:
class_names.append(directory)
while self.camera_running:
ret, img = self.cv_cap.read()
img_array = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img_array, INPUT_SHAPE[:2])
self.cvl_image.setPixmap(QPixmap.fromImage(qimage2ndarray.array2qimage(
cv2.resize(img_array, (320, 240), interpolation=cv2.INTER_NEAREST))))
new_array = np.expand_dims(new_array, axis=0)
new_array = np.array(new_array).reshape(PREDICT_SHAPE)
new_array = new_array / 255.0
result_1 = self.model.predict([new_array])
result = int(np.argmax(result_1[0]))
self.label_tfn_result.setText(class_names[result] + ' on photo.')
self.cv_cap.release()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.setStyle('fusion')
window = LR5()
window.show()
app.exec_()
|
test_smtp_mailer.py | import asyncore
from email import message_from_bytes
import smtpd
from smtplib import SMTPException
import threading
import pytest
from ..mailshake import EmailMessage, SMTPMailer
def make_emails():
return [
EmailMessage(
"Subject-%s" % num, "Content", "from@example.com", "to@example.com"
)
for num in range(1, 5)
]
smtp_server = None
SMTP_HOST = "127.0.0.1"
SMTP_PORT = 8080
class FakeSMTPServer(smtpd.SMTPServer):
"""A Fake smtp server"""
def __init__(self, host, port):
print("Running fake SMTP server")
localaddr = (host, port)
remoteaddr = None
smtpd.SMTPServer.__init__(self, localaddr, remoteaddr)
self.flush_sink()
def flush_sink(self):
self.sink = []
def process_message(self, peer, from_, to, bmessage, **kwargs):
self.sink.append(message_from_bytes(bmessage))
def start(self):
# timeout parameter is important, otherwise code will block 30 seconds after
# the SMTP channel has been closed
self.thread = threading.Thread(target=asyncore.loop, kwargs={"timeout": 0.1})
self.thread.daemon = True
self.thread.start()
def stop(self):
# close the SMTPserver to ensure no channels connect to asyncore
self.close()
# now it is save to wait for the thread to finish,
# i.e. for asyncore.loop() to exit
self.thread.join(timeout=0.5)
def setup_module():
global smtp_server
smtp_server = FakeSMTPServer(SMTP_HOST, SMTP_PORT)
smtp_server.start()
def teardown_module():
global smtp_server
if smtp_server is not None:
smtp_server.stop()
def test_sending():
global smtp_server
smtp_server.flush_sink()
mailer = SMTPMailer(host=SMTP_HOST, port=SMTP_PORT, use_tls=False)
email1, email2, email3, email4 = make_emails()
assert mailer.send_messages(email1) == 1
assert mailer.send_messages(email2, email3) == 2
assert mailer.send_messages(email4) == 1
sink = smtp_server.sink
assert len(sink) == 4
message = sink[0]
print(message)
assert message.get_content_type() == "text/plain"
assert message.get("subject") == "Subject-1"
assert message.get("from") == "from@example.com"
assert message.get("to") == "to@example.com"
def test_sending_unicode():
global smtp_server
smtp_server.flush_sink()
mailer = SMTPMailer(host="127.0.0.1", port=SMTP_PORT, use_tls=False)
email = EmailMessage(
"Olé", "Contenido en español", "from@example.com", "toБ@example.com"
)
assert mailer.send_messages(email)
sink = smtp_server.sink
assert len(sink) == 1
def test_notls():
mailer = SMTPMailer(host="127.0.0.1", port=SMTP_PORT, use_tls=True)
with pytest.raises(SMTPException):
mailer.open()
mailer.close()
def test_wrong_host():
mailer = SMTPMailer(host="123", port=SMTP_PORT, use_tls=False, timeout=0.5)
with pytest.raises(Exception):
mailer.open()
mailer.close()
def test_wrong_port():
mailer = SMTPMailer(host="127.0.0.1", port=3000, use_tls=False)
with pytest.raises(Exception):
mailer.open()
mailer.close()
def test_fail_silently():
mailer = SMTPMailer(
host="127.0.0.1", port=SMTP_PORT, use_tls=True, fail_silently=True
)
mailer.open()
mailer.close()
mailer = SMTPMailer(
host="123", port=SMTP_PORT, use_tls=False, fail_silently=True, timeout=0.5
)
mailer.open()
mailer.close()
mailer = SMTPMailer(host="127.0.0.1", port=3000, use_tls=False, fail_silently=True)
mailer.open()
mailer.close()
def test_batch_too_many_recipients():
global smtp_server
smtp_server.flush_sink()
mailer = SMTPMailer(
host="127.0.0.1", port=SMTP_PORT, use_tls=False, max_recipients=200
)
send_to = ["user{}@example.com".format(i) for i in range(1, 1501)]
msg = EmailMessage("The Subject", "Content", "from@example.com", send_to)
assert mailer.send_messages(msg) == 1
sink = smtp_server.sink
assert len(sink) == 8
assert len(sink[0].get("to").split(",")) == 200
assert len(sink[1].get("to").split(",")) == 200
assert len(sink[2].get("to").split(",")) == 200
assert len(sink[3].get("to").split(",")) == 200
assert len(sink[4].get("to").split(",")) == 200
assert len(sink[5].get("to").split(",")) == 200
assert len(sink[6].get("to").split(",")) == 200
assert len(sink[7].get("to").split(",")) == 100
|
user.py | #!/usr/bin/env python2.7
import argparse # new in Python2.7
import atexit
import logging
import string
import sys
import threading
import time
logging.basicConfig(level=logging.ERROR)
from yapsy.PluginManager import PluginManager
# Load the plugins from the plugin directory.
manager = PluginManager()
if __name__ == '__main__':
print ("------------user.py-------------")
parser = argparse.ArgumentParser(description="OpenBCI 'user'")
parser.add_argument('--board', default="cyton",
help="Choose between [cyton] and [ganglion] boards.")
parser.add_argument('-l', '--list', action='store_true',
help="List available plugins.")
parser.add_argument('-i', '--info', metavar='PLUGIN',
help="Show more information about a plugin.")
parser.add_argument('-p', '--port',
help="For Cyton, port to connect to OpenBCI Dongle " +
"( ex /dev/ttyUSB0 or /dev/tty.usbserial-* ). For Ganglion, MAC address of the board. For both, AUTO to attempt auto-detection.")
parser.set_defaults(port="AUTO")
# baud rate is not currently used
parser.add_argument('-b', '--baud', default=115200, type=int,
help="Baud rate (not currently used)")
parser.add_argument('--no-filtering', dest='filtering',
action='store_false',
help="Disable notch filtering")
parser.set_defaults(filtering=True)
parser.add_argument('-d', '--daisy', dest='daisy',
action='store_true',
help="Force daisy mode (cyton board)")
parser.add_argument('-x', '--aux', dest='aux',
action='store_true',
help="Enable accelerometer/AUX data (ganglion board)")
# first argument: plugin name, then parameters for plugin
parser.add_argument('-a', '--add', metavar=('PLUGIN', 'PARAM'),
action='append', nargs='+',
help="Select which plugins to activate and set parameters.")
parser.add_argument('--log', dest='log', action='store_true',
help="Log program")
parser.add_argument('--plugins-path', dest='plugins_path', nargs='+',
help="Additional path(s) to look for plugins")
parser.set_defaults(daisy=False, log=False)
args = parser.parse_args()
if not(args.add):
print ("WARNING: no plugin selected, you will only be able to communicate with the board. You should select at least one plugin with '--add [plugin_name]'. Use '--list' to show available plugins or '--info [plugin_name]' to get more information.")
if args.board == "cyton":
print ("Board type: OpenBCI Cyton (v3 API)")
import openbci.cyton as bci
elif args.board == "ganglion":
print ("Board type: OpenBCI Ganglion")
import openbci.ganglion as bci
else:
raise ValueError('Board type %r was not recognized. Known are 3 and 4' % args.board)
# Check AUTO port selection, a "None" parameter for the board API
if "AUTO" == args.port.upper():
print("Will try do auto-detect board's port. Set it manually with '--port' if it goes wrong.")
args.port = None
else:
print("Port: ", args.port)
plugins_paths = ["openbci/plugins"]
if args.plugins_path:
plugins_paths += args.plugins_path
manager.setPluginPlaces(plugins_paths)
manager.collectPlugins()
# Print list of available plugins and exit
if args.list:
print ("Available plugins:")
for plugin in manager.getAllPlugins():
print ("\t- " + plugin.name)
exit()
# User wants more info about a plugin...
if args.info:
plugin = manager.getPluginByName(args.info)
if plugin == None:
# eg: if an import fail inside a plugin, yapsy skip it
print ("Error: [ " + args.info + " ] not found or could not be loaded. Check name and requirements.")
else:
print (plugin.description)
plugin.plugin_object.show_help()
exit()
print ("\n------------SETTINGS-------------")
print ("Notch filtering:" + str(args.filtering))
# Logging
if args.log:
print ("Logging Enabled: " + str(args.log))
logging.basicConfig(filename="OBCI.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logging.getLogger('yapsy').setLevel(logging.DEBUG)
logging.info('---------LOG START-------------')
logging.info(args)
else:
print ("user.py: Logging Disabled.")
print ("\n-------INSTANTIATING BOARD-------")
if args.board == "cyton":
board = bci.OpenBCICyton(port=args.port,
baud=args.baud,
daisy=args.daisy,
filter_data=args.filtering,
scaled_output=True,
log=args.log)
elif args.board == "ganglion":
board = bci.OpenBCIGanglion(port=args.port,
filter_data=args.filtering,
scaled_output=True,
log=args.log,
aux=args.aux)
# Info about effective number of channels and sampling rate
if board.daisy:
print ("Force daisy mode:")
else:
print ("No daisy:")
print (board.getNbEEGChannels(), "EEG channels and", board.getNbAUXChannels(), "AUX channels at", board.getSampleRate(), "Hz.")
print ("\n------------PLUGINS--------------")
# Loop round the plugins and print their names.
print ("Found plugins:")
for plugin in manager.getAllPlugins():
print ("[ " + plugin.name + " ]")
print("\n")
# Fetch plugins, try to activate them, add to the list if OK
plug_list = []
callback_list = []
if args.add:
for plug_candidate in args.add:
# first value: plugin name, then optional arguments
plug_name = plug_candidate[0]
plug_args = plug_candidate[1:]
# Try to find name
plug = manager.getPluginByName(plug_name)
if plug == None:
# eg: if an import fail inside a plugin, yapsy skip it
print ("Error: [ " + plug_name + " ] not found or could not be loaded. Check name and requirements.")
else:
print ("\nActivating [ " + plug_name + " ] plugin...")
if not plug.plugin_object.pre_activate(plug_args, sample_rate=board.getSampleRate(), eeg_channels=board.getNbEEGChannels(), aux_channels=board.getNbAUXChannels(), imp_channels=board.getNbImpChannels()):
print ("Error while activating [ " + plug_name + " ], check output for more info.")
else:
print ("Plugin [ " + plug_name + "] added to the list")
plug_list.append(plug.plugin_object)
callback_list.append(plug.plugin_object)
if len(plug_list) == 0:
fun = None
else:
fun = callback_list
def cleanUp():
board.disconnect()
print ("Deactivating Plugins...")
for plug in plug_list:
plug.deactivate()
print ("User.py exiting...")
atexit.register(cleanUp)
print ("--------------INFO---------------")
print ("User serial interface enabled...\n\
View command map at http://docs.openbci.com.\n\
Type /start to run (/startimp for impedance \n\
checking, if supported) -- and /stop\n\
before issuing new commands afterwards.\n\
Type /exit to exit. \n\
Board outputs are automatically printed as: \n\
% <tab> message\n\
$$$ signals end of message")
print("\n-------------BEGIN---------------")
# Init board state
# s: stop board streaming; v: soft reset of the 32-bit board (no effect with 8bit board)
s = 'sv'
# Tell the board to enable or not daisy module
if board.daisy:
s = s + 'C'
else:
s = s + 'c'
# d: Channels settings back to default
s = s + 'd'
while(s != "/exit"):
# Send char and wait for registers to set
if (not s):
pass
elif("help" in s):
print ("View command map at: \
http://docs.openbci.com/software/01-OpenBCI_SDK.\n\
For user interface: read README or view \
https://github.com/OpenBCI/OpenBCI_Python")
elif board.streaming and s != "/stop":
print ("Error: the board is currently streaming data, please type '/stop' before issuing new commands.")
else:
# read silently incoming packet if set (used when stream is stopped)
flush = False
if('/' == s[0]):
s = s[1:]
rec = False # current command is recognized or fot
if("T:" in s):
lapse = int(s[string.find(s, "T:")+2:])
rec = True
elif("t:" in s):
lapse = int(s[string.find(s, "t:")+2:])
rec = True
else:
lapse = -1
if('startimp' in s):
if board.getBoardType() == "cyton":
print ("Impedance checking not supported on cyton.")
else:
board.setImpedance(True)
if(fun != None):
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(target=board.start_streaming, args=(fun, lapse))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
except:
raise
else:
print ("No function loaded")
rec = True
elif("start" in s):
board.setImpedance(False)
if(fun != None):
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(target=board.start_streaming, args=(fun, lapse))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
except:
raise
else:
print ("No function loaded")
rec = True
elif('test' in s):
test = int(s[s.find("test")+4:])
board.test_signal(test)
rec = True
elif('stop' in s):
board.stop()
rec = True
flush = True
if rec == False:
print("Command not recognized...")
elif s:
for c in s:
if sys.hexversion > 0x03000000:
board.ser_write(bytes(c, 'utf-8'))
else:
board.ser_write(bytes(c))
time.sleep(0.100)
line = ''
time.sleep(0.1) #Wait to see if the board has anything to report
# The Cyton nicely return incoming packets -- here supposedly messages -- whereas the Ganglion prints incoming ASCII message by itself
if board.getBoardType() == "cyton":
while board.ser_inWaiting():
c = board.ser_read().decode('utf-8', errors='replace') # we're supposed to get UTF8 text, but the board might behave otherwise
line += c
time.sleep(0.001)
if (c == '\n') and not flush:
print('%\t'+line[:-1])
line = ''
elif board.getBoardType() == "ganglion":
while board.ser_inWaiting():
board.waitForNotifications(0.001)
if not flush:
print(line)
# Take user input
#s = input('--> ')
if sys.hexversion > 0x03000000:
s = input('--> ')
else:
s = raw_input('--> ')
|
test_pr_preview.py | try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 3 case
from http.server import BaseHTTPRequestHandler, HTTPServer
import contextlib
import errno
import json
import os
import shutil
import stat
import subprocess
import tempfile
import threading
subject = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', 'pr_preview.py'
)
test_host = 'localhost'
def same_members(a, b):
if len(a) != len(b):
return False
a_copy = list(a)
for elem in b:
try:
a_copy.remove(elem)
except ValueError:
return False
return len(a_copy) == 0
# When these tests are executed in Windows, files in the temporary git
# repositories may be marked as "read only" at the moment they are intended to
# be deleted. The following handler for `shutil.rmtree` accounts for this by
# making the files writable and attempting to delete them a second time.
#
# Source:
# https://stackoverflow.com/questions/1213706/what-user-do-python-scripts-run-as-in-windows
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
candidates = (os.rmdir, os.remove, os.unlink)
if func in candidates and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
class MockHandler(BaseHTTPRequestHandler, object):
def do_all(self):
path = self.path.split('?')[0]
request_body = None
if 'Content-Length' in self.headers:
request_body = self.rfile.read(
int(self.headers['Content-Length'])
).decode('utf-8')
if self.headers.get('Content-Type') == 'application/json':
request_body = json.loads(request_body)
for request, response in self.server.expected_traffic:
if request[0] != self.command:
continue
if request[1] != path:
continue
body_matches = True
for key in request[2]:
body_matches &= request[2][key] == request_body.get(key)
if not body_matches:
continue
break
else:
request = (self.command, path, request_body)
response = (400, {})
self.server.actual_traffic.append((request, response))
self.send_response(response[0])
self.end_headers()
self.wfile.write(json.dumps(response[1]).encode('utf-8'))
def do_DELETE(self):
return self.do_all()
def do_GET(self):
return self.do_all()
def do_PATCH(self):
return self.do_all()
def do_POST(self):
return self.do_all()
class MockServer(HTTPServer, object):
'''HTTP server that responds to all requests with status code 200 and body
'{}' unless an alternative status code and body are specified for the given
method and path in the `responses` parameter.'''
def __init__(self, address, expected_traffic):
super(MockServer, self).__init__(address, MockHandler)
self.expected_traffic = expected_traffic
self.actual_traffic = []
def __enter__(self):
threading.Thread(target=lambda: self.serve_forever()).start()
return self
def __exit__(self, *args):
self.shutdown()
class Requests(object):
get_rate = ('GET', '/rate_limit', {})
search = ('GET', '/search/issues', {})
pr_details = ('GET', '/repos/test-org/test-repo/pulls/23', {})
ref_create_open = (
'POST', '/repos/test-org/test-repo/git/refs', {'ref':'refs/prs-open/23'}
)
ref_create_trusted = (
'POST',
'/repos/test-org/test-repo/git/refs',
{'ref':'refs/prs-trusted-for-preview/23'}
)
ref_update_open = (
'PATCH', '/repos/test-org/test-repo/git/refs/prs-open/23', {}
)
ref_update_trusted = (
'PATCH', '/repos/test-org/test-repo/git/refs/prs-trusted-for-preview/23', {}
)
deployment_get = ('GET', '/repos/test-org/test-repo/deployments', {})
deployment_create = ('POST', '/repos/test-org/test-repo/deployments', {})
deployment_status_create_pending = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'pending'}
)
deployment_status_create_error = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'error'}
)
deployment_status_create_success = (
'POST',
'/repos/test-org/test-repo/deployments/24601/statuses',
{'state':'success'}
)
preview = ('GET', '/.git/worktrees/45/HEAD', {})
class Responses(object):
no_limit = (200, {
'resources': {
'search': {
'remaining': 100,
'limit': 100
},
'core': {
'remaining': 100,
'limit': 100
}
}
})
@contextlib.contextmanager
def temp_repo():
directory = tempfile.mkdtemp()
try:
subprocess.check_call(['git', 'init'], cwd=directory)
subprocess.check_call(
['git', 'config', 'user.name', 'example'],
cwd=directory
)
subprocess.check_call(
['git', 'config', 'user.email', 'example@example.com'],
cwd=directory
)
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'first'],
cwd=directory
)
yield directory
finally:
shutil.rmtree(
directory, ignore_errors=False, onerror=handle_remove_readonly
)
def synchronize(expected_traffic, refs={}):
env = {
'DEPLOY_TOKEN': 'c0ffee'
}
env.update(os.environ)
server = MockServer((test_host, 0), expected_traffic)
test_port = server.server_address[1]
remote_refs = {}
with temp_repo() as local_repo, temp_repo() as remote_repo, server:
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'first'],
cwd=remote_repo
)
subprocess.check_call(
['git', 'commit', '--allow-empty', '-m', 'second'],
cwd=remote_repo
)
subprocess.check_call(
['git', 'remote', 'add', 'origin', remote_repo], cwd=local_repo
)
for name, value in refs.items():
subprocess.check_call(
['git', 'update-ref', name, value],
cwd=remote_repo
)
child = subprocess.Popen(
[
'python',
subject,
'--host',
'http://{}:{}'.format(test_host, test_port),
'--github-project',
'test-org/test-repo',
'synchronize',
'--window',
'3000'
],
cwd=local_repo,
env=env
)
child.communicate()
lines = subprocess.check_output(
['git', 'ls-remote', 'origin'], cwd=local_repo
)
for line in lines.decode('utf-8').strip().split('\n'):
revision, ref = line.split()
if not ref or ref in ('HEAD', 'refs/heads/master'):
continue
remote_refs[ref] = revision
return child.returncode, server.actual_traffic, remote_refs
def detect(event, expected_github_traffic, expected_preview_traffic):
env = {
'DEPLOY_TOKEN': 'c0ffee'
}
env.update(os.environ)
github_server = MockServer((test_host, 0), expected_github_traffic)
github_port = github_server.server_address[1]
preview_server = MockServer((test_host, 0), expected_preview_traffic)
preview_port = preview_server.server_address[1]
with temp_repo() as repo, github_server, preview_server:
env['GITHUB_EVENT_PATH'] = repo + '/event.json'
with open(env['GITHUB_EVENT_PATH'], 'w') as handle:
handle.write(json.dumps(event))
child = subprocess.Popen(
[
'python',
subject,
'--host',
'http://{}:{}'.format(test_host, github_port),
'--github-project',
'test-org/test-repo',
'detect',
'--target',
'http://{}:{}'.format(test_host, preview_port),
'--timeout',
'1'
],
cwd=repo,
env=env
)
child.communicate()
return (
child.returncode,
github_server.actual_traffic,
preview_server.actual_traffic
)
def test_synchronize_zero_results():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_fail_search_throttled():
expected_traffic = [
(Requests.get_rate, (
200,
{
'resources': {
'search': {
'remaining': 1,
'limit': 10
}
}
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode != 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_fail_incomplete_results():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [],
'incomplete_results': True
}
))
]
returncode, actual_traffic, remove_refs = synchronize(expected_traffic)
assert returncode != 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_closed():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': '2019-10-28',
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_sync_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
)),
(Requests.pr_details, (200,
{
'head': {
'repo': {
'full_name': 'test-org/test-repo'
}
}
}
)),
(Requests.ref_create_open, (200, {})),
(Requests.ref_create_trusted, (200, {})),
(Requests.deployment_get, (200, {})),
(Requests.deployment_create, (200, {}))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_collaborator_bot():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'chromium-wpt-export-bot'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_untrusted_contributor():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'CONTRIBUTOR'
}
],
'incomplete_results': False
}
))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_ignore_pull_request_from_fork():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
)),
(Requests.pr_details, (200,
{
'head': {
'repo': {
'full_name': 'some-other-org/test-repo'
}
}
}
)),
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_sync_trusted_contributor():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (
200,
{
'items': [
{
'number': 23,
'labels': [{'name': 'safe for preview'}],
'closed_at': None,
'user': {'login': 'Hexcles'},
'author_association': 'CONTRIBUTOR'
}
],
'incomplete_results': False
}
)),
(Requests.pr_details, (200,
{
'head': {
'repo': {
'full_name': 'test-org/test-repo'
}
}
}
)),
(Requests.ref_create_open, (200, {})),
(Requests.ref_create_trusted, (200, {})),
(Requests.deployment_get, (200, [])),
(Requests.deployment_create, (200, {}))
]
returncode, actual_traffic, remote_refs = synchronize(expected_traffic)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_update_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
)),
(Requests.pr_details, (200,
{
'head': {
'repo': {
'full_name': 'test-org/test-repo'
}
}
}
)),
(Requests.deployment_get, (200, [])),
(Requests.ref_update_open, (200, {})),
(Requests.ref_update_trusted, (200, {})),
(Requests.deployment_create, (200, {}))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_update_member():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': None,
'user': {'login': 'grace'},
'author_association': 'MEMBER'
}
],
'incomplete_results': False
}
)),
(Requests.pr_details, (200,
{
'head': {
'repo': {
'full_name': 'test-org/test-repo'
}
}
}
)),
(Requests.deployment_get, (200, [{'some': 'deployment'}])),
(Requests.ref_update_open, (200, {})),
(Requests.ref_update_trusted, (200, {}))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
def test_synchronize_delete_collaborator():
expected_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.search, (200,
{
'items': [
{
'number': 23,
'labels': [],
'closed_at': '2019-10-30',
'user': {'login': 'grace'},
'author_association': 'COLLABORATOR'
}
],
'incomplete_results': False
}
))
]
refs = {
'refs/pull/23/head': 'HEAD',
'refs/prs-open/23': 'HEAD~',
'refs/prs-trusted-for-preview/23': 'HEAD~'
}
returncode, actual_traffic, remote_refs = synchronize(expected_traffic, refs)
assert returncode == 0
assert same_members(expected_traffic, actual_traffic)
assert list(remote_refs) == ['refs/pull/23/head']
def test_detect_ignore_unknown_env():
expected_github_traffic = []
expected_preview_traffic = []
event = {
'deployment': {
'id': 24601,
'environment': 'ghosts',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 0
assert len(actual_github_traffic) == 0
assert len(actual_preview_traffic) == 0
def test_detect_fail_search_throttled():
expected_github_traffic = [
(Requests.get_rate, (
200,
{
'resources': {
'core': {
'remaining': 1,
'limit': 10
}
}
}
))
]
expected_preview_traffic = []
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert actual_github_traffic == expected_github_traffic
assert actual_preview_traffic == expected_preview_traffic
def test_detect_success():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_success, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (200, 3232))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 0
assert actual_github_traffic == expected_github_traffic
assert actual_preview_traffic == expected_preview_traffic
def test_detect_timeout_missing():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_error, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (404, {}))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert expected_github_traffic == actual_github_traffic
ping_count = len(actual_preview_traffic)
assert ping_count > 0
assert actual_preview_traffic == expected_preview_traffic * ping_count
def test_detect_timeout_wrong_revision():
expected_github_traffic = [
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_pending, (200, {})),
(Requests.get_rate, Responses.no_limit),
(Requests.deployment_status_create_error, (200, {}))
]
expected_preview_traffic = [
(Requests.preview, (200, 1234))
]
event = {
'deployment': {
'id': 24601,
'environment': 'wpt-preview-45',
'sha': '3232'
}
}
returncode, actual_github_traffic, actual_preview_traffic = detect(
event, expected_github_traffic, expected_preview_traffic
)
assert returncode == 1
assert expected_github_traffic == actual_github_traffic
ping_count = len(actual_preview_traffic)
assert ping_count > 0
assert actual_preview_traffic == expected_preview_traffic * ping_count
|
langserver.py | """Langserver support for autocompletions."""
# TODO: CompletionProvider
# TODO: error reporting in gui somehow
from __future__ import annotations
import dataclasses
import errno
import itertools
import logging
import os
import pathlib
import pprint
import queue
import re
import select
import shlex
import signal
import socket
import subprocess
import sys
import threading
import time
from functools import partial
from typing import IO, Dict, List, NamedTuple, Optional, Tuple, Union
if sys.platform != "win32":
import fcntl
import sansio_lsp_client as lsp
from porcupine import get_tab_manager, tabs, textwidget, utils
from porcupine.plugins import autocomplete, underlines
global_log = logging.getLogger(__name__)
# 1024 bytes was way too small, and with this chunk size, it
# still sometimes takes two reads to get everything (that's fine)
CHUNK_SIZE = 64 * 1024
class SubprocessStdIO:
def __init__(self, process: subprocess.Popen[bytes]) -> None:
self._process = process
if sys.platform == "win32":
self._read_queue: queue.Queue[bytes] = queue.Queue()
self._running = True
self._worker_thread = threading.Thread(target=self._stdout_to_read_queue, daemon=True)
self._worker_thread.start()
else:
# this works because we don't use .readline()
# https://stackoverflow.com/a/1810703
assert process.stdout is not None
fileno = process.stdout.fileno()
old_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
new_flags = old_flags | os.O_NONBLOCK
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
if sys.platform == "win32":
def _stdout_to_read_queue(self) -> None:
while True:
# for whatever reason, nothing works unless i go ONE BYTE at a
# time.... this is a piece of shit
assert self._process.stdout is not None
one_fucking_byte = self._process.stdout.read(1)
if not one_fucking_byte:
break
self._read_queue.put(one_fucking_byte)
# Return values:
# - nonempty bytes object: data was read
# - empty bytes object: process exited
# - None: no data to read
def read(self) -> Optional[bytes]:
if sys.platform == "win32":
# shitty windows code
buf = bytearray()
while True:
try:
buf += self._read_queue.get(block=False)
except queue.Empty:
break
if self._worker_thread.is_alive() and not buf:
return None
return bytes(buf)
else:
assert self._process.stdout is not None
return self._process.stdout.read(CHUNK_SIZE)
def write(self, bytez: bytes) -> None:
assert self._process.stdin is not None
self._process.stdin.write(bytez)
self._process.stdin.flush()
def error_says_socket_not_connected(error: OSError) -> bool:
if sys.platform == "win32":
return error.winerror == 10057
else:
return error.errno == errno.ENOTCONN
class LocalhostSocketIO:
def __init__(self, port: int, log: logging.LoggerAdapter) -> None:
self._sock = socket.socket()
# This queue solves two problems:
# - I don't feel like learning to do non-blocking send right now.
# - It must be possible to .write() before the socket is connected.
# The written bytes get sent when the socket connects.
self._send_queue: queue.Queue[Optional[bytes]] = queue.Queue()
self._worker_thread = threading.Thread(
target=self._send_queue_to_socket, args=[port, log], daemon=True
)
self._worker_thread.start()
def _send_queue_to_socket(self, port: int, log: logging.LoggerAdapter) -> None:
while True:
try:
self._sock.connect(("localhost", port))
log.info(f"connected to localhost:{port}")
break
except ConnectionRefusedError:
log.info(f"connecting to localhost:{port} failed, retrying soon")
time.sleep(0.5)
while True:
bytez = self._send_queue.get()
if bytez is None:
break
self._sock.sendall(bytez)
def write(self, bytez: bytes) -> None:
self._send_queue.put(bytez)
# Return values:
# - nonempty bytes object: data was received
# - empty bytes object: socket closed
# - None: no data to receive
def read(self) -> Optional[bytes]:
# figure out if we can read from the socket without blocking
# 0 is timeout, i.e. return immediately
#
# TODO: pass the correct non-block flag to recv instead?
# does that work on windows?
can_read, can_write, error = select.select([self._sock], [], [], 0)
if self._sock not in can_read:
return None
try:
result = self._sock.recv(CHUNK_SIZE)
except OSError as e:
if error_says_socket_not_connected(e):
return None
raise e
if not result:
assert result == b""
# stop worker thread
if self._worker_thread.is_alive():
self._send_queue.put(None)
return result
def completion_item_doc_contains_label(doc: str, label: str) -> bool:
# this used to be doc.startswith(label), but see issue #67
label = label.strip()
if "(" in label:
prefix = label.strip().split("(")[0] + "("
else:
prefix = label.strip()
return doc.startswith(prefix)
def get_completion_item_doc(item: lsp.CompletionItem) -> str:
if not item.documentation:
return item.label
if isinstance(item.documentation, lsp.MarkupContent):
result = item.documentation.value
else:
result = item.documentation
# try this with clangd
#
# // comment
# void foo(int x, char c) { }
#
# int main(void)
# {
# fo<Tab>
# }
if not completion_item_doc_contains_label(result, item.label):
result = item.label.strip() + "\n\n" + result
return result
def exit_code_string(exit_code: int) -> str:
if exit_code >= 0:
return f"exited with code {exit_code}"
signal_number = abs(exit_code)
result = f"was killed by signal {signal_number}"
try:
result += " (" + signal.Signals(signal_number).name + ")"
except ValueError:
# unknown signal, e.g. signal.SIGRTMIN + 5
pass
return result
def _position_tk2lsp(tk_position: Union[str, List[int]]) -> lsp.Position:
# this can't use tab.textwidget.index, because it needs to handle text
# locations that don't exist anymore when text has been deleted
if isinstance(tk_position, str):
line, column = map(int, tk_position.split("."))
else:
line, column = tk_position
# lsp line numbering starts at 0
# tk line numbering starts at 1
# both column numberings start at 0
return lsp.Position(line=line - 1, character=column)
def _position_lsp2tk(lsp_position: lsp.Position) -> str:
return f"{lsp_position.line + 1}.{lsp_position.character}"
def _get_diagnostic_string(diagnostic: lsp.Diagnostic) -> str:
if diagnostic.source is None:
assert diagnostic.message is not None # TODO
return diagnostic.message
return f"{diagnostic.source}: {diagnostic.message}"
@dataclasses.dataclass
class LangServerConfig:
command: str
language_id: str
port: Optional[int] = None
# FIXME: two langservers with same command, same port, different project_root
class LangServerId(NamedTuple):
command: str
port: Optional[int]
project_root: pathlib.Path
class LangServer:
def __init__(
self, process: subprocess.Popen[bytes], the_id: LangServerId, log: logging.LoggerAdapter
) -> None:
self._process = process
self._id = the_id
self._lsp_client = lsp.Client(trace="verbose", root_uri=the_id.project_root.as_uri())
self._lsp_id_to_tab_and_request: Dict[
lsp.Id, Tuple[tabs.FileTab, autocomplete.Request]
] = {}
self._version_counter = itertools.count()
self.log = log
self.tabs_opened: Dict[tabs.FileTab, List[utils.TemporaryBind]] = {}
self._is_shutting_down_cleanly = False
self._io: Union[SubprocessStdIO, LocalhostSocketIO]
if the_id.port is None:
self._io = SubprocessStdIO(process)
else:
self._io = LocalhostSocketIO(the_id.port, log)
def __repr__(self) -> str:
return (
f"<{type(self).__name__}: "
f"PID {self._process.pid}, "
f"{self._id}, "
f"{len(self.tabs_opened)} tabs opened>"
)
def _is_in_langservers(self) -> bool:
# This returns False if a langserver died and another one with the same
# id was launched.
return langservers.get(self._id, None) is self
def _get_removed_from_langservers(self) -> None:
# this is called more than necessary to make sure we don't end up with
# funny issues caused by unusable langservers
if self._is_in_langservers():
self.log.debug("getting removed from langservers")
del langservers[self._id]
# returns whether this should be called again later
def _ensure_langserver_process_quits_soon(self) -> None:
exit_code = self._process.poll()
if exit_code is None:
if self._lsp_client.state == lsp.ClientState.EXITED:
# process still running, but will exit soon. Let's make sure
# to log that when it happens so that if it doesn't exit for
# whatever reason, then that will be visible in logs.
self.log.debug("langserver process should stop soon")
get_tab_manager().after(500, self._ensure_langserver_process_quits_soon)
return
# langserver doesn't want to exit, let's kill it
what_closed = "stdout" if self._id.port is None else "socket connection"
self.log.warning(
f"killing langserver process {self._process.pid} "
f"because {what_closed} has closed for some reason"
)
self._process.kill()
exit_code = self._process.wait()
if self._is_shutting_down_cleanly:
self.log.info(f"langserver process terminated, {exit_code_string(exit_code)}")
else:
self.log.error(
f"langserver process terminated unexpectedly, {exit_code_string(exit_code)}"
)
self._get_removed_from_langservers()
# returns whether this should be ran again
def _run_stuff_once(self) -> bool:
self._io.write(self._lsp_client.send())
received_bytes = self._io.read()
# yes, None and b'' have a different meaning here
if received_bytes is None:
# no data received
return True
elif received_bytes == b"":
# stdout or langserver socket is closed. Communicating with the
# langserver process is impossible, so this LangServer object and
# the process are useless.
#
# TODO: try to restart the langserver process?
self._ensure_langserver_process_quits_soon()
return False
assert received_bytes
self.log.debug(f"got {len(received_bytes)} bytes of data")
try:
lsp_events = self._lsp_client.recv(received_bytes)
except Exception:
self.log.exception("error while receiving lsp events")
lsp_events = []
for lsp_event in lsp_events:
try:
self._handle_lsp_event(lsp_event)
except Exception:
self.log.exception("error while handling langserver event")
return True
def _send_tab_opened_message(self, tab: tabs.FileTab) -> None:
config = tab.settings.get("langserver", Optional[LangServerConfig])
assert isinstance(config, LangServerConfig)
assert tab.path is not None
self._lsp_client.did_open(
lsp.TextDocumentItem(
uri=tab.path.as_uri(),
languageId=config.language_id,
text=tab.textwidget.get("1.0", "end - 1 char"),
version=next(self._version_counter),
)
)
def _handle_lsp_event(self, lsp_event: lsp.Event) -> None:
self.log.debug(f"handling event: {lsp_event}")
if isinstance(lsp_event, lsp.Shutdown):
self.log.debug("langserver sent Shutdown event")
self._lsp_client.exit()
self._get_removed_from_langservers()
return
if isinstance(lsp_event, lsp.LogMessage):
# most langservers seem to use stdio instead of this
loglevel_dict = {
lsp.MessageType.LOG: logging.DEBUG,
lsp.MessageType.INFO: logging.INFO,
lsp.MessageType.WARNING: logging.WARNING,
lsp.MessageType.ERROR: logging.ERROR,
}
self.log.log(
loglevel_dict[lsp_event.type], f"message from langserver: {lsp_event.message}"
)
return
# rest of these need the langserver to be active
if not self._is_in_langservers():
self.log.warning(f"ignoring event because langserver is shutting down: {lsp_event}")
return
if isinstance(lsp_event, lsp.Initialized):
self.log.info(
"langserver initialized, capabilities:\n" + pprint.pformat(lsp_event.capabilities)
)
for tab in self.tabs_opened.keys():
self._send_tab_opened_message(tab)
return
if isinstance(lsp_event, lsp.Completion):
tab, req = self._lsp_id_to_tab_and_request.pop(lsp_event.message_id)
if tab not in self.tabs_opened:
# I wouldn't be surprised if some langserver sent completions to closed tabs
self.log.debug(f"Completion sent to closed tab: {lsp_event}")
return
# this is "open to interpretation", as the lsp spec says
# TODO: use textEdit when available (need to find langserver that
# gives completions with textEdit for that to work)
before_cursor = tab.textwidget.get(f"{req.cursor_pos} linestart", req.cursor_pos)
match = re.fullmatch(r".*?(\w*)", before_cursor)
assert match is not None
prefix_len = len(match.group(1))
assert lsp_event.completion_list is not None
tab.event_generate(
"<<AutoCompletionResponse>>",
data=autocomplete.Response(
id=req.id,
completions=[
autocomplete.Completion(
display_text=item.label,
replace_start=tab.textwidget.index(
f"{req.cursor_pos} - {prefix_len} chars"
),
replace_end=req.cursor_pos,
replace_text=item.insertText or item.label,
# TODO: is slicing necessary here?
filter_text=(item.filterText or item.insertText or item.label)[
prefix_len:
],
documentation=get_completion_item_doc(item),
)
for item in sorted(
lsp_event.completion_list.items,
key=(lambda item: item.sortText or item.label),
)
],
),
)
return
if isinstance(lsp_event, lsp.PublishDiagnostics):
matching_tabs = [
tab
for tab in self.tabs_opened.keys()
if tab.path is not None and tab.path.as_uri() == lsp_event.uri
]
if not matching_tabs:
# Some langservers send diagnostics to closed tabs
self.log.debug(f"PublishDiagnostics sent to closed tab: {lsp_event}")
return
[tab] = matching_tabs
tab.event_generate(
"<<SetUnderlines>>",
data=underlines.Underlines(
id="langserver_diagnostics",
underline_list=[
underlines.Underline(
start=_position_lsp2tk(diagnostic.range.start),
end=_position_lsp2tk(diagnostic.range.end),
message=_get_diagnostic_string(diagnostic),
# TODO: there are plenty of other severities than ERROR and WARNING
color=(
"red"
if diagnostic.severity == lsp.DiagnosticSeverity.ERROR
else "orange"
),
)
for diagnostic in sorted(
lsp_event.diagnostics,
# error red underlines should be shown over orange warning underlines
key=(lambda diagn: diagn.severity),
reverse=True,
)
],
),
)
return
raise NotImplementedError(lsp_event)
def run_stuff(self) -> None:
if self._run_stuff_once():
get_tab_manager().after(50, self.run_stuff)
def open_tab(self, tab: tabs.FileTab) -> None:
assert tab not in self.tabs_opened
self.tabs_opened[tab] = [
utils.TemporaryBind(tab, "<<AutoCompletionRequest>>", self.request_completions),
utils.TemporaryBind(
tab.textwidget, "<<ContentChanged>>", partial(self.send_change_events, tab)
),
utils.TemporaryBind(tab, "<Destroy>", (lambda event: self.forget_tab(tab))),
]
self.log.debug("tab opened")
if self._lsp_client.state == lsp.ClientState.NORMAL:
self._send_tab_opened_message(tab)
def forget_tab(self, tab: tabs.FileTab, *, may_shutdown: bool = True) -> None:
if not self._is_in_langservers():
self.log.debug(
"a tab was closed, but langserver process is no longer running (maybe it crashed?)"
)
return
self.log.debug("tab closed")
for binding in self.tabs_opened.pop(tab):
binding.unbind()
if may_shutdown and not self.tabs_opened:
self.log.info("no more open tabs, shutting down")
self._is_shutting_down_cleanly = True
self._get_removed_from_langservers()
if self._lsp_client.state == lsp.ClientState.NORMAL:
self._lsp_client.shutdown()
else:
# it was never fully started
self._process.kill()
def request_completions(self, event: utils.EventWithData) -> None:
if self._lsp_client.state != lsp.ClientState.NORMAL:
self.log.warning(
f"autocompletions requested but langserver state == {self._lsp_client.state!r}"
)
return
tab = event.widget
assert isinstance(tab, tabs.FileTab) and tab.path is not None
request = event.data_class(autocomplete.Request)
lsp_id = self._lsp_client.completions(
text_document_position=lsp.TextDocumentPosition(
textDocument=lsp.TextDocumentIdentifier(uri=tab.path.as_uri()),
position=_position_tk2lsp(request.cursor_pos),
),
context=lsp.CompletionContext(
# FIXME: this isn't always the case, porcupine can also trigger
# it automagically
triggerKind=lsp.CompletionTriggerKind.INVOKED
),
)
assert lsp_id not in self._lsp_id_to_tab_and_request
self._lsp_id_to_tab_and_request[lsp_id] = (tab, request)
def send_change_events(self, tab: tabs.FileTab, event: utils.EventWithData) -> None:
if self._lsp_client.state != lsp.ClientState.NORMAL:
# The langserver will receive the actual content of the file once
# it starts.
self.log.debug(
f"not sending change events because langserver state == {self._lsp_client.state!r}"
)
return
assert tab.path is not None
self._lsp_client.did_change(
text_document=lsp.VersionedTextDocumentIdentifier(
uri=tab.path.as_uri(), version=next(self._version_counter)
),
content_changes=[
lsp.TextDocumentContentChangeEvent(
range=lsp.Range(
start=_position_tk2lsp(change.start), end=_position_tk2lsp(change.end)
),
text=change.new_text,
)
for change in event.data_class(textwidget.Changes).change_list
],
)
langservers: Dict[LangServerId, LangServer] = {}
# I was going to add code that checks if two langservers use the same port
# number, but it's unnecessary: if a langserver tries to use a port number that
# is already being used, then it should exit with an error message.
def stream_to_log(stream: IO[bytes], log: logging.LoggerAdapter) -> None:
for line_bytes in stream:
line = line_bytes.rstrip(b"\r\n").decode("utf-8", errors="replace")
log.info(f"langserver logged: {line}")
def get_lang_server(tab: tabs.FileTab) -> Optional[LangServer]:
if tab.path is None:
return None
config = tab.settings.get("langserver", Optional[LangServerConfig])
if config is None:
return None
assert isinstance(config, LangServerConfig)
project_root = utils.find_project_root(tab.path)
the_id = LangServerId(config.command, config.port, project_root)
try:
return langservers[the_id]
except KeyError:
pass
# avoid shell=True on non-windows to get process.pid to do the right thing
#
# with shell=True it's the pid of the shell, not the pid of the program
#
# on windows, there is no shell and it's all about whether to quote or not
actual_command: Union[str, List[str]]
if sys.platform == "win32":
shell = True
actual_command = config.command
else:
shell = False
actual_command = shlex.split(config.command)
try:
if the_id.port is None:
# langserver writes log messages to stderr
process = subprocess.Popen(
actual_command,
shell=shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
else:
# most langservers log to stderr, but also watch stdout
process = subprocess.Popen(
actual_command, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except (OSError, subprocess.CalledProcessError):
global_log.exception(f"failed to start langserver with command {config.command!r}")
return None
log = logging.LoggerAdapter(global_log, {})
log.process = lambda msg, kwargs: (f"(PID={process.pid}) {msg}", kwargs) # type: ignore
log.info(
f"Langserver process started with command '{config.command}', project root '{project_root}'"
)
logging_stream = process.stderr if the_id.port is None else process.stdout
assert logging_stream is not None
threading.Thread(target=stream_to_log, args=[logging_stream, log], daemon=True).start()
langserver = LangServer(process, the_id, log)
langserver.run_stuff()
langservers[the_id] = langserver
return langserver
# Switch the tab to another langserver, starting one if needed
def switch_langservers(
tab: tabs.FileTab, called_because_path_changed: bool, junk: object = None
) -> None:
old = next(
(langserver for langserver in langservers.values() if tab in langserver.tabs_opened), None
)
new = get_lang_server(tab)
if old is not None and new is not None and old is new and called_because_path_changed:
old.log.info("Path changed, closing and reopening the tab")
old.forget_tab(tab, may_shutdown=False)
new.open_tab(tab)
if old is not new:
global_log.info(f"Switching langservers: {old} --> {new}")
if old is not None:
old.forget_tab(tab)
if new is not None:
new.open_tab(tab)
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
tab.settings.add_option("langserver", None, Optional[LangServerConfig])
tab.bind(
"<<TabSettingChanged:langserver>>", partial(switch_langservers, tab, False), add=True
)
tab.bind("<<PathChanged>>", partial(switch_langservers, tab, True), add=True)
switch_langservers(tab, False)
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
|
streamTest-threading.py | """
This example uses Python's built-in threading module to reach faster streaming
speeds than streamTest.py.
On a Ubuntu 9.10 machine with a AMD Athlon(tm) 64 X2 Dual Core Processor 5200+,
we got speeds up to 50kHz.
On a Mac OS 10.6 machine with a 2.53 GHz Intel Core 2 Duo Processor, we got
speeds up to 50kHz.
On a Mac OS 10.5 machine with a 1.42 GHz G4 Processor, we saw max speeds of
about 40kHz.
"""
import u3, u6, ue9, LabJackPython
from time import sleep
from datetime import datetime
import struct
import threading
import queue
import ctypes, copy, sys
# MAX_REQUESTS is the number of packets to be read.
MAX_REQUESTS = 2500
d = None
################################################################################
## U3
## Uncomment these lines to stream from a U3
################################################################################
## At high frequencies ( >5 kHz), the number of samples will be MAX_REQUESTS times 48 (packets per request) times 25 (samples per packet)
#d = u3.U3()
#
## to learn the if the U3 is an HV
#d.configU3()
#
## For applying the proper calibration to readings.
#d.getCalibrationData()
#
## Set the FIO0 to Analog
#d.configIO(FIOAnalog = 1)
#
#print "configuring U3 stream"
#d.streamConfig( NumChannels = 1, PChannels = [ 0 ], NChannels = [ 31 ], Resolution = 3, SampleFrequency = 20000 )
################################################################################
## U6
## Uncomment these lines to stream from a U6
################################################################################
## At high frequencies ( >5 kHz), the number of samples will be MAX_REQUESTS times 48 (packets per request) times 25 (samples per packet)
#d = u6.U6()
#
## For applying the proper calibration to readings.
#d.getCalibrationData()
#
#print "configuring U6 stream"
#d.streamConfig( NumChannels = 1, ChannelNumbers = [ 0 ], ChannelOptions = [ 0 ], SettlingFactor = 1, ResolutionIndex = 1, SampleFrequency = 50000 )
################################################################################
## UE9
## Uncomment these lines to stream from a UE9
################################################################################
# At 150 Hz or higher frequencies, the number of samples will be MAX_REQUESTS times 10 (packets per request) times 16 (samples per packet).
#d = ue9.UE9()
#
## For applying the proper calibration to readings.
#d.getCalibrationData()
#
#print "configuring UE9 stream"
#
#d.streamConfig( NumChannels = 1, ChannelNumbers = [ 0 ], ChannelOptions = [ 0 ], SettlingTime = 0, Resolution = 12, SampleFrequency = 50000 )
if d is None:
print("Configure a device first.\nPlease open streamTest-threading.py in a text editor and uncomment the lines for your device, starting at about line 16.\n\nExiting...")
sys.exit(0)
class StreamDataReader(object):
def __init__(self, device):
self.device = device
self.data = queue.Queue()
self.dataCount = 0
self.missed = 0
self.running = False
def readStreamData(self):
self.running = True
start = datetime.now()
self.device.streamStart()
while self.running:
# Calling with convert = False, because we are going to convert in
# the main thread.
returnDict = next(self.device.streamData(convert = False))
self.data.put_nowait(copy.deepcopy(returnDict))
self.dataCount += 1
if self.dataCount > MAX_REQUESTS:
self.running = False
print("stream stopped.")
self.device.streamStop()
stop = datetime.now()
total = self.dataCount * self.device.packetsPerRequest * self.device.streamSamplesPerPacket
print("%s requests with %s packets per request with %s samples per packet = %s samples total." % ( self.dataCount, d.packetsPerRequest, d.streamSamplesPerPacket, total ))
print("%s samples were lost due to errors." % self.missed)
total -= self.missed
print("Adjusted number of samples = %s" % total)
runTime = (stop-start).seconds + float((stop-start).microseconds)/1000000
print("The experiment took %s seconds." % runTime)
print("%s samples / %s seconds = %s Hz" % ( total, runTime, float(total)/runTime ))
sdr = StreamDataReader(d)
sdrThread = threading.Thread(target = sdr.readStreamData)
# Start the stream and begin loading the result into a Queue
sdrThread.start()
errors = 0
missed = 0
while True:
try:
# Check if the thread is still running
if not sdr.running:
break
# Pull results out of the Queue in a blocking manner.
result = sdr.data.get(True, 1)
# If there were errors, print that.
if result['errors'] != 0:
errors += result['errors']
missed += result['missed']
print("+++++ Total Errors: %s, Total Missed: %s" % (errors, missed))
# Convert the raw bytes (result['result']) to voltage data.
r = d.processStreamData(result['result'])
# Do some processing on the data to show off.
print("Average of", len(r['AIN0']), "reading(s):", sum(r['AIN0'])/len(r['AIN0']))
except queue.Empty:
print("Queue is empty. Stopping...")
sdr.running = False
break
except KeyboardInterrupt:
sdr.running = False
except Exception as e:
print(type(e), e)
sdr.running = False
break
|
rebalanceout.py | import time
from threading import Thread
import threading
from rebalance.rebalance_base import RebalanceBaseTest
from couchbase_helper.documentgenerator import BlobGenerator
from membase.api.rest_client import RestConnection
from membase.helper.rebalance_helper import RebalanceHelper
from remote.remote_util import RemoteMachineShellConnection
from membase.api.exception import RebalanceFailedException
from membase.helper.cluster_helper import ClusterOperationHelper
from memcached.helper.kvstore import KVStore
class RebalanceOutTests(RebalanceBaseTest):
def setUp(self):
super(RebalanceOutTests, self).setUp()
def tearDown(self):
super(RebalanceOutTests, self).tearDown()
"""Rebalances nodes out of a cluster while doing docs ops:create, delete, update.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. Before rebalance we perform docs ops(add/remove/update/read)
in the cluster( operate with a half of items that were loaded before).It then remove nodes_out
from the cluster at a time and rebalances. Once the cluster has been rebalanced we wait for the
disk queues to drain, and then verify that there has been no data loss, sum(curr_items) match the
curr_items_total. We also check for data and its meta-data, vbucket sequene numbers"""
def rebalance_out_after_ops(self):
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items // 2, end=self.num_items)
gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 // 2)
# define which doc's ops will be performed during rebalancing
# allows multiple of them but one by one
tasks = []
if(self.doc_ops is not None):
if("update" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
if("create" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0)
if("delete" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0)
for task in tasks:
task.result()
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
self._verify_stats_all_buckets(self.servers[:self.num_servers], timeout=120)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
prev_failover_stats = self.get_failovers_logs(self.servers[:self.num_servers], self.buckets)
prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.num_servers], self.buckets)
record_data_set = self.get_data_set_all(self.servers[:self.num_servers], self.buckets)
self.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
rebalance = self.cluster.async_rebalance(self.servers[:1], [], servs_out)
rebalance.result()
self._verify_stats_all_buckets(self.servers[:self.num_servers - self.nodes_out], timeout=120)
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out], check_ep_items_remaining=True)
new_failover_stats = self.compare_failovers_logs(prev_failover_stats, self.servers[:self.num_servers - self.nodes_out], self.buckets)
new_vbucket_stats = self.compare_vbucket_seqnos(prev_vbucket_stats, self.servers[:self.num_servers - self.nodes_out], self.buckets, perNode=False)
self.sleep(60)
self.data_analysis_all(record_data_set, self.servers[:self.num_servers - self.nodes_out], self.buckets)
self.compare_vbucketseq_failoverlogs(new_vbucket_stats, new_failover_stats)
self.verify_unacked_bytes_all_buckets()
nodes = self.get_nodes_in_cluster(self.master)
self.vb_distribution_analysis(servers=nodes, buckets=self.buckets, std=1.0, total_vbuckets=self.total_vbuckets)
"""Rebalances nodes out with failover and full recovery add back of a node
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. Before rebalance we perform docs ops(add/remove/update/read)
in the cluster( operate with a half of items that were loaded before).It then remove nodes_out
from the cluster at a time and rebalances. Once the cluster has been rebalanced we wait for the
disk queues to drain, and then verify that there has been no data loss, sum(curr_items) match the
curr_items_total. We also check for data and its meta-data, vbucket sequene numbers"""
def rebalance_out_with_failover_full_addback_recovery(self):
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items // 2, end=self.num_items)
gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 // 2)
# define which doc's ops will be performed during rebalancing
# allows multiple of them but one by one
tasks = []
if(self.doc_ops is not None):
if("update" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
if("create" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0)
if("delete" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0)
for task in tasks:
task.result()
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
self._verify_stats_all_buckets(self.servers[:self.num_servers], timeout=120)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.rest = RestConnection(self.master)
chosen = RebalanceHelper.pick_nodes(self.master, howmany=1)
self.sleep(20)
prev_failover_stats = self.get_failovers_logs(self.servers[:self.num_servers], self.buckets)
prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.num_servers], self.buckets)
record_data_set = self.get_data_set_all(self.servers[:self.num_servers], self.buckets)
self.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
# Mark Node for failover
success_failed_over = self.rest.fail_over(chosen[0].id, graceful=False)
self.wait_for_failover_or_assert(expected_failover_count=1)
# Mark Node for full recovery
if success_failed_over:
self.rest.set_recovery_type(otpNode=chosen[0].id, recoveryType="full")
rebalance = self.cluster.async_rebalance(self.servers[:1], [], servs_out)
rebalance.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out], check_ep_items_remaining=True)
self.compare_failovers_logs(prev_failover_stats, self.servers[:self.num_servers - self.nodes_out], self.buckets)
self.sleep(30)
self.data_analysis_all(record_data_set, self.servers[:self.num_servers - self.nodes_out], self.buckets)
self.verify_unacked_bytes_all_buckets()
nodes = self.get_nodes_in_cluster(self.master)
self.vb_distribution_analysis(servers=nodes, buckets=self.buckets, std=1.0, total_vbuckets=self.total_vbuckets)
"""Rebalances nodes out with failover
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. Before rebalance we perform docs ops(add/remove/update/read)
in the cluster( operate with a half of items that were loaded before).It then remove nodes_out
from the cluster at a time and rebalances. Once the cluster has been rebalanced we wait for the
disk queues to drain, and then verify that there has been no data loss, sum(curr_items) match the
curr_items_total. We also check for data and its meta-data, vbucket sequene numbers"""
def rebalance_out_with_failover(self):
fail_over = self.input.param("fail_over", False)
self.rest = RestConnection(self.master)
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items // 2, end=self.num_items)
gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 // 2)
# define which doc's ops will be performed during rebalancing
# allows multiple of them but one by one
tasks = []
if(self.doc_ops is not None):
if("update" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
if("create" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0)
if("delete" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0)
for task in tasks:
task.result()
ejectedNode = self.find_node_info(self.master, self.servers[self.nodes_init - 1])
self._verify_stats_all_buckets(self.servers[:self.num_servers], timeout=120)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.sleep(20)
prev_failover_stats = self.get_failovers_logs(self.servers[:self.nodes_init], self.buckets)
prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.nodes_init], self.buckets)
record_data_set = self.get_data_set_all(self.servers[:self.nodes_init], self.buckets)
self.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
self.rest = RestConnection(self.master)
chosen = RebalanceHelper.pick_nodes(self.master, howmany=1)
new_server_list = self.add_remove_servers(self.servers, self.servers[:self.nodes_init], [self.servers[self.nodes_init - 1], chosen[0]], [])
# Mark Node for failover
success_failed_over = self.rest.fail_over(chosen[0].id, graceful=fail_over)
self.wait_for_failover_or_assert(expected_failover_count=1)
self.nodes = self.rest.node_statuses()
self.rest.rebalance(otpNodes=[node.id for node in self.nodes], ejectedNodes=[chosen[0].id, ejectedNode.id])
self.assertTrue(self.rest.monitorRebalance(stop_if_loop=True), msg="Rebalance failed")
self.verify_cluster_stats(new_server_list, check_ep_items_remaining=True)
self.sleep(30)
self.data_analysis_all(record_data_set, new_server_list, self.buckets)
self.verify_unacked_bytes_all_buckets()
nodes = self.get_nodes_in_cluster(self.master)
self.vb_distribution_analysis(servers=nodes, buckets=self.buckets, std=1.0, total_vbuckets=self.total_vbuckets)
"""Rebalances nodes out of a cluster while doing docs ops:create, delete, update.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. It then remove nodes_out from the cluster at a time
and rebalances. During the rebalance we perform docs ops(add/remove/update/read)
in the cluster( operate with a half of items that were loaded before).
Once the cluster has been rebalanced we wait for the disk queues to drain,
and then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
Once all nodes have been rebalanced the test is finished."""
def rebalance_out_with_ops(self):
tasks = list()
gen_delete = BlobGenerator('mike', 'mike-', self.value_size,
start=self.num_items // 2,
end=self.num_items)
gen_create = BlobGenerator('mike', 'mike-', self.value_size,
start=self.num_items + 1,
end=self.num_items * 3 // 2)
servs_out = [self.servers[self.num_servers - i - 1]
for i in range(self.nodes_out)]
# define which doc's ops will be performed during rebalancing
# allows multiple of them but one by one
if self.doc_ops is not None:
if "update" in self.doc_ops:
tasks += self._async_load_all_buckets(
self.master, self.gen_update, "update", 0)
if "create" in self.doc_ops:
tasks += self._async_load_all_buckets(
self.master, gen_create, "create", 0)
if "delete" in self.doc_ops:
tasks += self._async_load_all_buckets(
self.master, gen_delete, "delete", 0)
rebalance = self.cluster.async_rebalance(
self.servers[:1], [], servs_out,
sleep_before_rebalance=self.sleep_before_rebalance)
rebalance.result()
for task in tasks:
task.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
# Validate seq_no snap_start/stop values after rebalance
self.check_snap_start_corruption()
"""Rebalances nodes out of a cluster while doing docs ops:create, delete, update along with compaction.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. It then remove nodes_out from the cluster at a time
and rebalances. During the rebalance we perform docs ops(add/remove/update/read)
in the cluster( operate with a half of items that were loaded before).
Once the cluster has been rebalanced we wait for the disk queues to drain,
and then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
Once all nodes have been rebalanced the test is finished."""
def rebalance_out_with_compaction_and_ops(self):
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items // 2, end=self.num_items)
gen_create = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items + 1, end=self.num_items * 3 // 2)
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
tasks = [self.cluster.async_rebalance(self.servers[:1], [], servs_out)]
for bucket in self.buckets:
tasks.append(self.cluster.async_compact_bucket(self.master, bucket))
# define which doc's ops will be performed during rebalancing
# allows multiple of them but one by one
if(self.doc_ops is not None):
if("update" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
if("create" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_create, "create", 0)
if("delete" in self.doc_ops):
tasks += self._async_load_all_buckets(self.master, gen_delete, "delete", 0)
for task in tasks:
task.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes from a cluster during getting random keys.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. Then we send requests to all nodes in the cluster
to get random key values. Next step is remove nodes_out from the cluster
and rebalance it. During rebalancing we get random keys from all nodes and verify
that are different every time. Once the cluster has been rebalanced
we again get random keys from all new nodes in the cluster,
than we wait for the disk queues to drain, and then verify that there has been no data loss,
sum(curr_items) match the curr_items_total."""
def rebalance_out_get_random_key(self):
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
# get random keys for new added nodes
rest_cons = [RestConnection(self.servers[i]) for i in range(self.num_servers)]
list_threads = []
for rest in rest_cons:
t = Thread(target=rest.get_random_key,
name="get_random_key",
args=(self.default_bucket_name,))
list_threads.append(t)
t.start()
[t.join() for t in list_threads]
rest_cons = [RestConnection(self.servers[i]) for i in range(self.num_servers - self.nodes_out)]
rebalance = self.cluster.async_rebalance(self.servers[:self.num_servers], [], servs_out)
self.sleep(2)
result = []
num_iter = 0
# get random keys for each node during rebalancing
while rest_cons[0]._rebalance_progress_status() == 'running' and num_iter < 100:
list_threads = []
temp_result = []
self.log.info("getting random keys for all nodes in cluster....")
for rest in rest_cons:
t = Thread(target=rest.get_random_key,
name="get_random_key",
args=(self.default_bucket_name,))
list_threads.append(t)
temp_result.append(rest.get_random_key(self.default_bucket_name))
t.start()
[t.join() for t in list_threads]
if tuple(temp_result) == tuple(result):
self.log.exception("random keys are not changed")
else:
result = temp_result
num_iter += 1
rebalance.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of a cluster while doing docs' ops.
This test begins with all servers clustered together and loads a user
defined number of items into the cluster. It then removes two nodes at a
time from the cluster and rebalances. During the rebalance we update
(all of the items in the cluster)/delete(num_items/(num_servers -1) in
each iteration)/create(a half of initial items in each iteration).
Once the cluster has been rebalanced the test waits for the disk queues
to drain and then verifies that there has been no data loss,
sum(curr_items) match the curr_items_total.
Once all nodes have been rebalanced out of the cluster the test finishes.
"""
def incremental_rebalance_out_with_ops(self):
batch_size = 1000
for i in reversed(list(range(1, self.num_servers, 2))):
if i == 1:
batch_size = 1
tasks = list()
if self.doc_ops is not None:
# define which doc_ops will be performed during rebalancing
# only one type of ops can be passed
if "update" in self.doc_ops:
# 1/2th of data will be updated in each iteration
tasks += self._async_load_all_buckets(
self.master, self.gen_update, "update", 0,
batch_size=batch_size)
elif "create" in self.doc_ops:
# 1/2th of initial data will be added in each iteration
gen_create = BlobGenerator(
'mike', 'mike-', self.value_size,
start=self.num_items * (self.num_servers - i) // 2.0,
end=self.num_items * (self.num_servers - i + 1) // 2.0)
tasks += self._async_load_all_buckets(
self.master, gen_create, "create", 0,
batch_size=batch_size)
elif "delete" in self.doc_ops:
# 1/(num_servers) of initial data will be removed after each iteration
# at the end we should get empty base( or couple items)
gen_delete = BlobGenerator(
'mike', 'mike-', self.value_size,
start=int(self.num_items * (1 - i // (self.num_servers - 1.0))) + 1,
end=int(self.num_items * (1 - (i - 1) // (self.num_servers - 1.0))))
tasks += self._async_load_all_buckets(
self.master, gen_delete, "delete", 0,
batch_size=batch_size)
rebalance = self.cluster.async_rebalance(
self.servers[:i], [], self.servers[i:i + 2],
sleep_before_rebalance=self.sleep_before_rebalance)
try:
rebalance.result()
for task in tasks:
task.result()
except Exception as ex:
rebalance.cancel()
for task in tasks:
task.cancel()
raise ex
self.verify_cluster_stats(self.servers[:i])
# Validate seq_no snap_start/stop values after rebalance
self.check_snap_start_corruption()
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of a cluster during view queries.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. It creates num_views as
development/production view with default map view funcs(is_dev_ddoc = True by default).
It then removes nodes_out nodes at a time and rebalances that node from the cluster.
During the rebalancing we perform view queries for all views and verify the expected number
of docs for them. Perform the same view queries after cluster has been completed. Then we wait for
the disk queues to drain, and then verify that there has been no data loss,sum(curr_items) match
the curr_items_total. Once successful view queries the test is finished."""
def rebalance_out_with_queries(self):
num_views = self.input.param("num_views", 5)
is_dev_ddoc = self.input.param("is_dev_ddoc", False)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {}
query["connectionTimeout"] = 60000;
query["full_set"] = "true"
views = []
tasks = []
for bucket in self.buckets:
temp = self.make_default_views(self.default_view_name, num_views, is_dev_ddoc)
temp_tasks = self.async_create_views(self.master, ddoc_name, temp, bucket)
views += temp
tasks += temp_tasks
timeout = None
if self.active_resident_threshold == 0:
timeout = max(self.wait_timeout * 4, len(self.buckets) * self.wait_timeout * self.num_items // 50000)
for task in tasks:
task.result(self.wait_timeout * 20)
for bucket in self.buckets:
for view in views:
# run queries to create indexes
self.cluster.query_view(self.master, prefix + ddoc_name, view.name, query)
active_tasks = self.cluster.async_monitor_active_task(self.servers, "indexer", "_design/" + prefix + ddoc_name, wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
expected_rows = None
if self.max_verify:
expected_rows = self.max_verify
query["limit"] = expected_rows
query["stale"] = "false"
for bucket in self.buckets:
self.perform_verify_queries(num_views, prefix, ddoc_name, query, bucket=bucket, wait_time=timeout, expected_rows=expected_rows)
servs_out = self.servers[-self.nodes_out:]
rebalance = self.cluster.async_rebalance([self.master], [], servs_out)
self.sleep(self.wait_timeout // 5)
# see that the result of view queries are the same as expected during the test
for bucket in self.buckets:
self.perform_verify_queries(num_views, prefix, ddoc_name, query, bucket=bucket, wait_time=timeout, expected_rows=expected_rows)
# verify view queries results after rebalancing
rebalance.result()
for bucket in self.buckets:
self.perform_verify_queries(num_views, prefix, ddoc_name, query, bucket=bucket, wait_time=timeout, expected_rows=expected_rows)
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of a cluster during view queries incrementally.
This test begins with all servers clustered together and loading a given number of items
into the cluster. It creates num_views as development/production view with
default map view funcs(is_dev_ddoc = True by default). It then adds two nodes at a time and
rebalances that node into the cluster. During the rebalancing we perform view queries
for all views and verify the expected number of docs for them.
Perform the same view queries after cluster has been completed. Then we wait for
the disk queues to drain, and then verify that there has been no data loss, sum(curr_items) match
the curr_items_total. Once all nodes have been rebalanced in the test is finished."""
def incremental_rebalance_out_with_queries(self):
num_views = self.input.param("num_views", 5)
is_dev_ddoc = self.input.param("is_dev_ddoc", True)
views = self.make_default_views(self.default_view_name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
# increase timeout for big data
timeout = None
if self.active_resident_threshold == 0:
timeout = max(self.wait_timeout * 5, self.wait_timeout * self.num_items // 25000)
query = {}
query["connectionTimeout"] = 60000;
query["full_set"] = "true"
tasks = []
tasks = self.async_create_views(self.master, ddoc_name, views, self.default_bucket_name)
for task in tasks:
task.result(self.wait_timeout * 2)
for view in views:
# run queries to create indexes
self.cluster.query_view(self.master, prefix + ddoc_name, view.name, query, timeout=self.wait_timeout * 2)
for i in range(3):
active_tasks = self.cluster.async_monitor_active_task(self.servers, "indexer", "_design/" + prefix + ddoc_name, wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.sleep(2)
expected_rows = None
if self.max_verify:
expected_rows = self.max_verify
query["limit"] = expected_rows
query["stale"] = "false"
self.perform_verify_queries(num_views, prefix, ddoc_name, query, wait_time=timeout, expected_rows=expected_rows)
query["stale"] = "update_after"
for i in reversed(list(range(1, self.num_servers, 2))):
rebalance = self.cluster.async_rebalance(self.servers[:i], [], self.servers[i:i + 2])
self.sleep(self.wait_timeout // 5)
# see that the result of view queries are the same as expected during the test
self.perform_verify_queries(num_views, prefix, ddoc_name, query, wait_time=timeout, expected_rows=expected_rows)
# verify view queries results after rebalancing
rebalance.result()
self.perform_verify_queries(num_views, prefix, ddoc_name, query, wait_time=timeout, expected_rows=expected_rows)
self.verify_cluster_stats(self.servers[:i])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes into a cluster when one node is warming up.
This test begins with loads a user defined number of items into the cluster
and all servers clustered together. Next steps are: stop defined
node(master_restart = False by default), wait 20 sec and start the stopped node.
Without waiting for the node to start up completely, rebalance out servs_out servers.
Expect that rebalance is failed. Wait for warmup completed and start
rebalance with the same configuration. Once the cluster has been rebalanced
we wait for the disk queues to drain, and then verify that there has been no data loss,
sum(curr_items) match the curr_items_total."""
def rebalance_out_with_warming_up(self):
master_restart = self.input.param("master_restart", False)
if master_restart:
warmup_node = self.master
else:
warmup_node = self.servers[len(self.servers) - self.nodes_out - 1]
servs_out = self.servers[len(self.servers) - self.nodes_out:]
shell = RemoteMachineShellConnection(warmup_node)
shell.stop_couchbase()
self.sleep(20)
shell.start_couchbase()
shell.disconnect()
try:
rebalance = self.cluster.async_rebalance(self.servers, [], servs_out)
rebalance.result()
except RebalanceFailedException:
self.log.info("rebalance was failed as expected")
self.assertTrue(ClusterOperationHelper._wait_warmup_completed(self, [warmup_node], \
self.default_bucket_name, wait_time=self.wait_timeout * 10))
self.log.info("second attempt to rebalance")
rebalance = self.cluster.async_rebalance(self.servers, [], servs_out)
rebalance.result()
self.verify_cluster_stats(self.servers[:len(self.servers) - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of cluster during ddoc compaction.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. It creates num_views as development/production
view with default map view funcs(is_dev_ddoc = True by default).
Then we disabled compaction for ddoc. While we don't reach expected fragmentation
for ddoc we update docs and perform view queries. We rebalance in nodes_in nodes
and start compaction when fragmentation was reached fragmentation_value.
During the rebalancing we wait while compaction will be completed.
After rebalancing and compaction we wait for the disk queues to drain, and then
verify that there has been no data loss, sum(curr_items) match the curr_items_total."""
def rebalance_out_with_ddoc_compaction(self):
num_views = self.input.param("num_views", 5)
fragmentation_value = self.input.param("fragmentation_value", 80)
# now dev_ indexes are not auto-updated, doesn't work with dev view
is_dev_ddoc = False
views = self.make_default_views(self.default_view_name, num_views, is_dev_ddoc)
ddoc_name = "ddoc1"
prefix = ("", "dev_")[is_dev_ddoc]
query = {}
query["connectionTimeout"] = 60000;
query["full_set"] = "true"
expected_rows = None
if self.max_verify:
expected_rows = self.max_verify
query["limit"] = expected_rows
tasks = []
tasks = self.async_create_views(self.master, ddoc_name, views, self.default_bucket_name, with_query=False)
for task in tasks:
task.result(self.wait_timeout * 2)
self.disable_compaction()
fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master,
prefix + ddoc_name, fragmentation_value, self.default_bucket_name)
end_time = time.time() + self.wait_timeout * 30
# generate load until fragmentation reached
while fragmentation_monitor.state != "FINISHED" and end_time > time.time():
# update docs to create fragmentation
"""
it's better to use _load_all_buckets instead of _async_load_all_buckets
it's workaround: unable to load data when disable_compaction
now we have a lot of issues for views & compaction....
"""
update_tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0)
for update_task in update_tasks:
update_task.result(600)
for view in views:
# run queries to create indexes
try:
self.log.info("query view {0}/{1}".format(prefix + ddoc_name, view.name))
self.cluster.query_view(self.master, prefix + ddoc_name, view.name, query)
except SetViewInfoNotFound as e:
self.log.warning("exception on self.cluster.query_view")
fragmentation_monitor.cancel()
raise e
if end_time < time.time() and fragmentation_monitor.state != "FINISHED":
self.fail("impossible to reach compaction value {0} after {1} sec".
format(fragmentation_value, (self.wait_timeout * 30)))
fragmentation_monitor.result()
for i in range(3):
active_tasks = self.cluster.async_monitor_active_task(self.servers, "indexer", "_design/" + ddoc_name, wait_task=False)
for active_task in active_tasks:
result = active_task.result()
self.assertTrue(result)
self.sleep(2)
query["stale"] = "false"
self.perform_verify_queries(num_views, prefix, ddoc_name, query, wait_time=self.wait_timeout * 5, expected_rows=expected_rows)
compaction_task = self.cluster.async_compact_view(self.master, prefix + ddoc_name, self.default_bucket_name, with_rebalance=True)
servs_out = self.servers[-self.nodes_out:]
rebalance = self.cluster.async_rebalance([self.master], [], servs_out)
result = compaction_task.result(self.wait_timeout * 10)
self.assertTrue(result)
rebalance.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of a cluster while doing mutations and deletions.
This test begins with all servers clustered together and loads a user defined
number of items into the cluster. It then removes one node at a time from the
cluster and rebalances. During the rebalance we update half of the items in the
cluster and delete the other half. Once the cluster has been rebalanced the test
recreates all of the deleted items, waits for the disk queues to drain, and then
verifies that there has been no data loss, sum(curr_items) match the curr_items_total.
Once all nodes have been rebalanced out of the cluster the test finishes."""
def incremental_rebalance_out_with_mutation_and_deletion(self):
gen_2 = BlobGenerator('rebalance-del', 'rebalance-del-', self.value_size, start=self.num_items // 2 + 2000,
end=self.num_items)
batch_size = 1000
for i in reversed(list(range(self.num_servers))[1:]):
# don't use batch for rebalance out 2-1 nodes
for bucket in self.buckets:
bucket.kvs[2] = KVStore()
tasks = [self.cluster.async_rebalance(self.servers[:i], [], [self.servers[i]])]
tasks += self._async_load_all_buckets(self.master, self.gen_update, "update", 0, kv_store=1, batch_size=batch_size, timeout_secs=60)
tasks += self._async_load_all_buckets(self.master, gen_2, "delete", 0, kv_store=2, batch_size=batch_size, timeout_secs=60)
for task in tasks:
task.result()
self.sleep(5)
self._load_all_buckets(self.master, gen_2, "create", 0)
self.verify_cluster_stats(self.servers[:i])
self.verify_unacked_bytes_all_buckets()
"""Rebalances nodes out of a cluster while doing mutations and expirations.
This test begins with all servers clustered together and loads a user defined number
of items into the cluster. It then removes one node at a time from the cluster and
rebalances. During the rebalance we update all of the items in the cluster and set
half of the items to expire in 5 seconds. Once the cluster has been rebalanced the
test recreates all of the expired items, waits for the disk queues to drain, and then
verifies that there has been no data loss, sum(curr_items) match the curr_items_total.
Once all nodes have been rebalanced out of the cluster the test finishes."""
def incremental_rebalance_out_with_mutation_and_expiration(self):
gen_2 = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items // 2 + 2000,
end=self.num_items)
batch_size = 1000
for i in reversed(list(range(self.num_servers))[2:]):
# don't use batch for rebalance out 2-1 nodes
rebalance = self.cluster.async_rebalance(self.servers[:i], [], [self.servers[i]])
self._load_all_buckets(self.master, self.gen_update, "update", 0, batch_size=batch_size, timeout_secs=60)
self._load_all_buckets(self.master, gen_2, "update", 5, batch_size=batch_size, timeout_secs=60)
rebalance.result()
self.sleep(5)
self._load_all_buckets(self.master, gen_2, "create", 0)
self.verify_cluster_stats(self.servers[:i])
self.verify_unacked_bytes_all_buckets()
def rebalance_out_with_compaction_and_expiration_ops(self):
self.total_loader_threads = self.input.param("total_loader_threads", 10)
self.expiry_items = self.input.param("expiry_items", 100000)
self.max_expiry = self.input.param("max_expiry", 30)
thread_list = []
self._expiry_pager(self.master, val=1000000)
for bucket in self.buckets:
RestConnection(self.master).set_auto_compaction(dbFragmentThreshold=100, bucket = bucket.name)
num_items = self.expiry_items
expiry_range = self.max_expiry
for x in range(1, self.total_loader_threads):
t = threading.Thread(target=self.run_mc_bin_client, args = (num_items, expiry_range))
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
for x in range(1, self.total_loader_threads):
t = threading.Thread(target=self.run_mc_bin_client, args = (num_items, expiry_range))
t.daemon = True
t.start()
thread_list.append(t)
self.sleep(20)
tasks = []
servs_out = self.servers[len(self.servers) - self.nodes_out:]
tasks = [self.cluster.async_rebalance(self.servers[:1], [], servs_out)]
t = threading.Thread(target=self._run_compaction)
t.daemon = True
t.start()
thread_list.append(t)
for task in tasks:
task.result()
for t in thread_list:
t.join()
|
pickletester.py | import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
save_restore_warnings_filters,
check_impl_detail, impl_detail
)
from pickle import bytes_types
# bpo-41003: Save/restore warnings filters to leave them unchanged.
# Ignore filters installed by numpy.
try:
with save_restore_warnings_filters():
import numpy as np
except ImportError:
np = None
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
def identity(x):
return x
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
class MinimalIO(object):
"""
A file-like object that doesn't support readinto().
"""
def __init__(self, *args):
self._bio = io.BytesIO(*args)
self.getvalue = self._bio.getvalue
self.read = self._bio.read
self.readline = self._bio.readline
self.write = self._bio.write
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
# Simple mutable object.
class Object:
pass
# Hashable immutable key object containing unheshable mutable data.
class K:
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests:
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(pickle.UnpicklingError, b'g0\np0')
self.check_unpickling_error(pickle.UnpicklingError, b'jens:')
self.check_unpickling_error(pickle.UnpicklingError, b'hens:')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_reduce(self):
self.assertEqual(self.loads(b'cbuiltins\nint\n)R.'), 0)
self.check_unpickling_error(TypeError, b'N)R.')
self.check_unpickling_error(TypeError, b'cbuiltins\nint\nNR.')
def test_bad_newobj(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)\x81.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)\x81.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN\x81.')
def test_bad_newobj_ex(self):
error = (pickle.UnpicklingError, TypeError)
self.assertEqual(self.loads(b'cbuiltins\nint\n)}\x92.'), 0)
self.check_unpickling_error(error, b'cbuiltins\nlen\n)}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\nN}\x92.')
self.check_unpickling_error(error, b'cbuiltins\nint\n)N\x92.')
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests:
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def _test_recursive_list(self, cls, aslist=identity, minprotocol=0):
# List containing itself.
l = cls()
l.append(l)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
def test_recursive_list(self):
self._test_recursive_list(list)
def test_recursive_list_subclass(self):
self._test_recursive_list(MyList, minprotocol=2)
def test_recursive_list_like(self):
self._test_recursive_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_tuple_and_list(self, cls, aslist=identity, minprotocol=0):
# Tuple containing a list containing the original tuple.
t = (cls(),)
t[0].append(t)
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = aslist(x[0])
self.assertEqual(len(y), 1)
self.assertIs(y[0], x)
# List containing a tuple containing the original list.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = aslist(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(y[0], tuple)
self.assertEqual(len(y[0]), 1)
self.assertIs(y[0][0], x)
def test_recursive_tuple_and_list(self):
self._test_recursive_tuple_and_list(list)
def test_recursive_tuple_and_list_subclass(self):
self._test_recursive_tuple_and_list(MyList, minprotocol=2)
def test_recursive_tuple_and_list_like(self):
self._test_recursive_tuple_and_list(REX_six, aslist=lambda x: x.items)
def _test_recursive_dict(self, cls, asdict=identity, minprotocol=0):
# Dict containing itself.
d = cls()
d[1] = d
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y.keys()), [1])
self.assertIs(y[1], x)
def test_recursive_dict(self):
self._test_recursive_dict(dict)
def test_recursive_dict_subclass(self):
self._test_recursive_dict(MyDict, minprotocol=2)
def test_recursive_dict_like(self):
self._test_recursive_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing the original tuple.
t = (cls(),)
t[0][1] = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(list(y), [1])
self.assertIs(y[1], x)
# Dict containing a tuple containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(list(y), [1])
self.assertIsInstance(y[1], tuple)
self.assertEqual(len(y[1]), 1)
self.assertIs(y[1][0], x)
def test_recursive_tuple_and_dict(self):
self._test_recursive_tuple_and_dict(dict)
def test_recursive_tuple_and_dict_subclass(self):
self._test_recursive_tuple_and_dict(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like(self):
self._test_recursive_tuple_and_dict(REX_seven, asdict=lambda x: x.table)
def _test_recursive_dict_key(self, cls, asdict=identity, minprotocol=0):
# Dict containing an immutable object (as key) containing the original
# dict.
d = cls()
d[K(d)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y.keys()), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
def test_recursive_dict_key(self):
self._test_recursive_dict_key(dict)
def test_recursive_dict_subclass_key(self):
self._test_recursive_dict_key(MyDict, minprotocol=2)
def test_recursive_dict_like_key(self):
self._test_recursive_dict_key(REX_seven, asdict=lambda x: x.table)
def _test_recursive_tuple_and_dict_key(self, cls, asdict=identity, minprotocol=0):
# Tuple containing a dict containing an immutable object (as key)
# containing the original tuple.
t = (cls(),)
t[0][K(t)] = 1
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], cls)
y = asdict(x[0])
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value, x)
# Dict containing an immutable object (as key) containing a tuple
# containing the original dict.
t, = t
for proto in range(minprotocol, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, cls)
y = asdict(x)
self.assertEqual(len(y), 1)
self.assertIsInstance(list(y.keys())[0], K)
self.assertIs(list(y.keys())[0].value[0], x)
def test_recursive_tuple_and_dict_key(self):
self._test_recursive_tuple_and_dict_key(dict)
def test_recursive_tuple_and_dict_subclass_key(self):
self._test_recursive_tuple_and_dict_key(MyDict, minprotocol=2)
def test_recursive_tuple_and_dict_like_key(self):
self._test_recursive_tuple_and_dict_key(REX_seven, asdict=lambda x: x.table)
def test_recursive_set(self):
# Set containing an immutable object containing the original set.
y = set()
y.add(K(y))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
# Immutable object containing a set containing the original object.
y, = y
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, K)
self.assertIsInstance(x.value, set)
self.assertEqual(len(x.value), 1)
self.assertIs(list(x.value)[0], x)
def test_recursive_inst(self):
# Mutable object containing itself.
i = Object()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, Object)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = Object()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertIs(x[0].attr[1], x)
def _test_recursive_collection_and_inst(self, factory):
# Mutable object containing a collection containing the original
# object.
o = Object()
o.attr = factory([o])
t = type(o.attr)
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x.attr, t)
self.assertEqual(len(x.attr), 1)
self.assertIsInstance(list(x.attr)[0], Object)
self.assertIs(list(x.attr)[0], x)
# Collection containing a mutable object containing the original
# collection.
o = o.attr
for proto in protocols:
s = self.dumps(o, proto)
x = self.loads(s)
self.assertIsInstance(x, t)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], Object)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self._test_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self._test_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self._test_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self._test_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self._test_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self._test_recursive_collection_and_inst(MyFrozenSet)
def test_recursive_inst_state(self):
# Mutable object containing itself.
y = REX_state()
y.state = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIs(x.state, x)
def test_recursive_tuple_and_inst_state(self):
# Tuple containing a mutable object containing the original tuple.
t = (REX_state(),)
t[0].state = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], REX_state)
self.assertIs(x[0].state, x)
# Mutable object containing a tuple containing the object.
t, = t
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, REX_state)
self.assertIsInstance(x.state, tuple)
self.assertEqual(len(x.state), 1)
self.assertIs(x.state[0], x)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_overridden_new(self):
# Test that Python class with C implemented __new__ is pickleable
for proto in protocols:
x = MyIntWithNew2(1)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
self.assertIs(type(y), MyIntWithNew2)
self.assertEqual(int(y), 1)
self.assertEqual(y.foo, 42)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
with support.infinite_recursion():
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
@impl_detail("pypy does not store attribute names", pypy=False)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
@impl_detail("This test is too strong indeed", pypy=False)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
@support.skip_if_pgo_task
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
@support.skip_if_pgo_task
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests:
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
class REX_state(object):
"""This class is used to check the 3th argument (state) of
the reduce protocol.
"""
def __init__(self, state=None):
self.state = state
def __eq__(self, other):
return type(self) is type(other) and self.state == other.state
def __setstate__(self, state):
self.state = state
def __reduce__(self):
return type(self), (), self.state
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class MyIntWithNew(int):
def __new__(cls, value):
raise AssertionError
class MyIntWithNew2(MyIntWithNew):
__new__ = int.__new__
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests:
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests:
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests:
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests:
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass, *, seekable=True):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if seekable:
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if seekable:
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO, seekable=False)
def test_multiple_unpicklings_minimal(self):
# File-like object that doesn't support peek() and readinto()
# (bpo-39681)
self._check_multiple_unpicklings(MinimalIO, seekable=False)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests:
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overridden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
@support.cpython_only
def test_reducer_override_no_reference_cycle(self):
# bpo-39492: reducer_override used to induce a spurious reference cycle
# inside the Pickler object, that could prevent all serialized objects
# from being garbage-collected without explicitly invoking gc.collect.
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
def f():
pass
wr = weakref.ref(f)
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump(f)
new_f = pickle.loads(bio.getvalue())
assert new_f == 5
del p
del f
self.assertIsNone(wr())
class AbstractDispatchTableTests:
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
fifo_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class FIFOQueueTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(10, tf.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.FIFOQueue(10, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
def testEnqueueDictWithoutNames(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(3, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = sess.run(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
dequeued_t.op.run()
self.assertEqual(0, size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
enqueue_op.run()
self.assertEqual([0], size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = sess.run(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = sess.run(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(tf.int32, enq.inputs[1].dtype)
self.assertEqual(tf.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ()))
with self.assertRaises(ValueError):
q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = tf.placeholder(tf.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = tf.FIFOQueue(1000, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(50, tf.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
sess.run(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_placeholder = tf.placeholder(tf.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = tf.placeholder(
tf.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(elements_enqueued,
elements_enqueued + count,
dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = tf.placeholder(tf.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
sess.run(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(elements_dequeued,
elements_dequeued + count,
dtype=np.int32)
self.assertAllEqual(
expected_range, dequeuemany_t.eval({count_placeholder: count}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.FIFOQueue(100, tf.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.FIFOQueue(total_count, tf.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], sess.run(dequeued_t))
self.assertAllEqual(elems[3:], sess.run(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.FIFOQueue(10, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
def testBlockingEnqueueBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
with self.test_session() as sess:
q = tf.FIFOQueue(4, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
def testDoesNotLoseValue(self):
with self.test_session():
q = tf.FIFOQueue(1, tf.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.FIFOQueue(
1, tf.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a")
q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b")
q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.eval()
q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c")
q_c_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.eval()
q_d_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.FIFOQueue(
10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f")
q_f_2 = tf.FIFOQueue(
10, (tf.float32, tf.int32), shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(tf.FIFOQueue(10, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.FIFOQueue(10, tf.float32)
q2 = tf.FIFOQueue(15, tf.float32)
enq_q = tf.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.FIFOQueue(5, tf.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.FIFOQueue(5, tf.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(5, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.FIFOQueue(2, tf.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.int64, tf.bool, tf.complex64, tf.complex128]
shape = (32, 4, 128)
q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == tf.bool:
np_array = np_array > 0
elif dtype in (tf.complex64, tf.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = sess.run(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDeviceColocation(self):
with tf.device("/job:ps"):
q = tf.FIFOQueue(32, [tf.int32], name="q")
with tf.device("/job:worker/task:7"):
dequeued_t = q.dequeue()
self.assertDeviceEqual("/job:ps", dequeued_t.device)
self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups())
class FIFOQueueDictTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
shared_name="foo", name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with tf.Graph().as_default():
q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"),
shapes=(tf.TensorShape([1, 1, 2, 3]),
tf.TensorShape([5, 8])), name="Q")
self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueue'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string),
shapes=((), (), ()), names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"], "x": [1, 2]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127],
"s": ["dd", "ee"]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
sess.run(enqueue_op)
sess.run(enqueue_op2)
sess.run(enqueue_op3)
sess.run(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(tf.compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")],
list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")],
list(s))
class FIFOQueueWithTimeoutTest(tf.test.TestCase):
def testDequeueWithTimeout(self):
with self.test_session(
config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes(""),
q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t)
def testReusableAfterTimeout(self):
with self.test_session() as sess:
q = tf.FIFOQueue(10, tf.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))
sess.run(enqueue_op)
self.assertEqual(37, sess.run(dequeued_t))
class QueueContainerTest(tf.test.TestCase):
def testContainer(self):
with tf.Graph().as_default():
with tf.container("test"):
q = tf.FIFOQueue(10, tf.float32)
self.assertEqual(tf.compat.as_bytes("test"),
q.queue_ref.op.get_attr("container"))
class FIFOQueueBenchmark(tf.test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = tf.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = tf.Graph()
with graph.as_default():
init, output = self._build_graph()
with tf.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
tf.test.main()
|
btcproxy.py | """ A groestlcoind proxy that allows instrumentation and canned responses
"""
from flask import Flask, request
from bitcoin.rpc import JSONRPCError
from bitcoin.rpc import RawProxy as BitcoinProxy
from cheroot.wsgi import Server
from cheroot.wsgi import PathInfoDispatcher
import decimal
import flask
import json
import logging
import os
import threading
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override it's handling
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return "{:.8f}".format(float(o))
return super(DecimalEncoder, self).default(o)
class BitcoinRpcProxy(object):
def __init__(self, bitcoind, rpcport=0):
self.app = Flask("BitcoindProxy")
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST'])
self.rpcport = rpcport
self.mocks = {}
self.mock_counts = {}
self.bitcoind = bitcoind
self.request_count = 0
def _handle_request(self, r):
conf_file = os.path.join(self.bitcoind.bitcoin_dir, 'groestlcoin.conf')
brpc = BitcoinProxy(btc_conf_file=conf_file)
method = r['method']
# If we have set a mock for this method reply with that instead of
# forwarding the request.
if method in self.mocks and type(method) == dict:
self.mock_counts[method] += 1
return self.mocks[method]
elif method in self.mocks and callable(self.mocks[method]):
self.mock_counts[method] += 1
return self.mocks[method](r)
try:
reply = {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
except JSONRPCError as e:
reply = {
"error": e.error,
"id": r['id']
}
self.request_count += 1
return reply
def proxy(self):
r = json.loads(request.data.decode('ASCII'))
if isinstance(r, list):
reply = [self._handle_request(subreq) for subreq in r]
else:
reply = self._handle_request(r)
response = flask.Response(json.dumps(reply, cls=DecimalEncoder))
response.headers['Content-Type'] = 'application/json'
return response
def start(self):
d = PathInfoDispatcher({'/': self.app})
self.server = Server(('0.0.0.0', self.rpcport), d)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that groeslcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.rpcport = self.server.bind_addr[1]
logging.debug("BitcoinRpcProxy proxying incoming port {} to {}".format(self.rpcport, self.bitcoind.rpcport))
def stop(self):
self.server.stop()
self.proxy_thread.join()
logging.debug("BitcoinRpcProxy shut down after processing {} requests".format(self.request_count))
def mock_rpc(self, method, response=None):
"""Mock the response to a future RPC call of @method
The response can either be a dict with the full JSON-RPC response, or a
function that returns such a response. If the response is None the mock
is removed and future calls will be passed through to groestlcoind again.
"""
if response is not None:
self.mocks[method] = response
self.mock_counts[method] = 0
elif method in self.mocks:
del self.mocks[method]
|
task.py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from collections import deque
import socket
import sys
import threading
import time
from .buffers import ReadOnlyFileBasedBuffer
from .utilities import build_http_date, logger, queue_logger
rename_headers = { # or keep them without the HTTP_ prefix added
"CONTENT_LENGTH": "CONTENT_LENGTH",
"CONTENT_TYPE": "CONTENT_TYPE",
}
hop_by_hop = frozenset(
(
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailers",
"transfer-encoding",
"upgrade",
)
)
class ThreadedTaskDispatcher:
"""A Task Dispatcher that creates a thread for each task."""
stop_count = 0 # Number of threads that will stop soon.
active_count = 0 # Number of currently active threads
logger = logger
queue_logger = queue_logger
def __init__(self):
self.threads = set()
self.queue = deque()
self.lock = threading.Lock()
self.queue_cv = threading.Condition(self.lock)
self.thread_exit_cv = threading.Condition(self.lock)
def start_new_thread(self, target, thread_no):
t = threading.Thread(
target=target, name="waitress-{}".format(thread_no), args=(thread_no,)
)
t.daemon = True
t.start()
def handler_thread(self, thread_no):
while True:
with self.lock:
while not self.queue and self.stop_count == 0:
# Mark ourselves as idle before waiting to be
# woken up, then we will once again be active
self.active_count -= 1
self.queue_cv.wait()
self.active_count += 1
if self.stop_count > 0:
self.active_count -= 1
self.stop_count -= 1
self.threads.discard(thread_no)
self.thread_exit_cv.notify()
break
task = self.queue.popleft()
try:
task.service()
except BaseException:
self.logger.exception("Exception when servicing %r", task)
def set_thread_count(self, count):
with self.lock:
threads = self.threads
thread_no = 0
running = len(threads) - self.stop_count
while running < count:
# Start threads.
while thread_no in threads:
thread_no = thread_no + 1
threads.add(thread_no)
running += 1
self.start_new_thread(self.handler_thread, thread_no)
self.active_count += 1
thread_no = thread_no + 1
if running > count:
# Stop threads.
self.stop_count += running - count
self.queue_cv.notify_all()
def add_task(self, task):
with self.lock:
self.queue.append(task)
self.queue_cv.notify()
queue_size = len(self.queue)
idle_threads = len(self.threads) - self.stop_count - self.active_count
if queue_size > idle_threads:
self.queue_logger.warning(
"Task queue depth is %d", queue_size - idle_threads
)
def shutdown(self, cancel_pending=True, timeout=5):
self.set_thread_count(0)
# Ensure the threads shut down.
threads = self.threads
expiration = time.time() + timeout
with self.lock:
while threads:
if time.time() >= expiration:
self.logger.warning("%d thread(s) still running", len(threads))
break
self.thread_exit_cv.wait(0.1)
if cancel_pending:
# Cancel remaining tasks.
queue = self.queue
if len(queue) > 0:
self.logger.warning("Canceling %d pending task(s)", len(queue))
while queue:
task = queue.popleft()
task.cancel()
self.queue_cv.notify_all()
return True
return False
class Task:
close_on_finish = False
status = "200 OK"
wrote_header = False
start_time = 0
content_length = None
content_bytes_written = 0
logged_write_excess = False
logged_write_no_body = False
complete = False
chunked_response = False
logger = logger
def __init__(self, channel, request):
self.channel = channel
self.request = request
self.response_headers = []
version = request.version
if version not in ("1.0", "1.1"):
# fall back to a version we support.
version = "1.0"
self.version = version
def service(self):
try:
self.start()
self.execute()
self.finish()
except OSError:
self.close_on_finish = True
if self.channel.adj.log_socket_errors:
raise
@property
def has_body(self):
return not (
self.status.startswith("1")
or self.status.startswith("204")
or self.status.startswith("304")
)
def build_response_header(self):
version = self.version
# Figure out whether the connection should be closed.
connection = self.request.headers.get("CONNECTION", "").lower()
response_headers = []
content_length_header = None
date_header = None
server_header = None
connection_close_header = None
for (headername, headerval) in self.response_headers:
headername = "-".join([x.capitalize() for x in headername.split("-")])
if headername == "Content-Length":
if self.has_body:
content_length_header = headerval
else:
continue # pragma: no cover
if headername == "Date":
date_header = headerval
if headername == "Server":
server_header = headerval
if headername == "Connection":
connection_close_header = headerval.lower()
# replace with properly capitalized version
response_headers.append((headername, headerval))
if (
content_length_header is None
and self.content_length is not None
and self.has_body
):
content_length_header = str(self.content_length)
response_headers.append(("Content-Length", content_length_header))
def close_on_finish():
if connection_close_header is None:
response_headers.append(("Connection", "close"))
self.close_on_finish = True
if version == "1.0":
if connection == "keep-alive":
if not content_length_header:
close_on_finish()
else:
response_headers.append(("Connection", "Keep-Alive"))
else:
close_on_finish()
elif version == "1.1":
if connection == "close":
close_on_finish()
if not content_length_header:
# RFC 7230: MUST NOT send Transfer-Encoding or Content-Length
# for any response with a status code of 1xx, 204 or 304.
if self.has_body:
response_headers.append(("Transfer-Encoding", "chunked"))
self.chunked_response = True
if not self.close_on_finish:
close_on_finish()
# under HTTP 1.1 keep-alive is default, no need to set the header
else:
raise AssertionError("neither HTTP/1.0 or HTTP/1.1")
# Set the Server and Date field, if not yet specified. This is needed
# if the server is used as a proxy.
ident = self.channel.server.adj.ident
if not server_header:
if ident:
response_headers.append(("Server", ident))
else:
response_headers.append(("Via", ident or "waitress"))
if not date_header:
response_headers.append(("Date", build_http_date(self.start_time)))
self.response_headers = response_headers
first_line = "HTTP/%s %s" % (self.version, self.status)
# NB: sorting headers needs to preserve same-named-header order
# as per RFC 2616 section 4.2; thus the key=lambda x: x[0] here;
# rely on stable sort to keep relative position of same-named headers
next_lines = [
"%s: %s" % hv for hv in sorted(self.response_headers, key=lambda x: x[0])
]
lines = [first_line] + next_lines
res = "%s\r\n\r\n" % "\r\n".join(lines)
return res.encode("latin-1")
def remove_content_length_header(self):
response_headers = []
for header_name, header_value in self.response_headers:
if header_name.lower() == "content-length":
continue # pragma: nocover
response_headers.append((header_name, header_value))
self.response_headers = response_headers
def start(self):
self.start_time = time.time()
def finish(self):
if not self.wrote_header:
self.write(b"")
if self.chunked_response:
# not self.write, it will chunk it!
self.channel.write_soon(b"0\r\n\r\n")
def write(self, data):
if not self.complete:
raise RuntimeError("start_response was not called before body written")
channel = self.channel
if not self.wrote_header:
rh = self.build_response_header()
channel.write_soon(rh)
self.wrote_header = True
if data and self.has_body:
towrite = data
cl = self.content_length
if self.chunked_response:
# use chunked encoding response
towrite = hex(len(data))[2:].upper().encode("latin-1") + b"\r\n"
towrite += data + b"\r\n"
elif cl is not None:
towrite = data[: cl - self.content_bytes_written]
self.content_bytes_written += len(towrite)
if towrite != data and not self.logged_write_excess:
self.logger.warning(
"application-written content exceeded the number of "
"bytes specified by Content-Length header (%s)" % cl
)
self.logged_write_excess = True
if towrite:
channel.write_soon(towrite)
elif data:
# Cheat, and tell the application we have written all of the bytes,
# even though the response shouldn't have a body and we are
# ignoring it entirely.
self.content_bytes_written += len(data)
if not self.logged_write_no_body:
self.logger.warning(
"application-written content was ignored due to HTTP "
"response that may not contain a message-body: (%s)" % self.status
)
self.logged_write_no_body = True
class ErrorTask(Task):
"""An error task produces an error response"""
complete = True
def execute(self):
e = self.request.error
status, headers, body = e.to_response()
self.status = status
self.response_headers.extend(headers)
# We need to explicitly tell the remote client we are closing the
# connection, because self.close_on_finish is set, and we are going to
# slam the door in the clients face.
self.response_headers.append(("Connection", "close"))
self.close_on_finish = True
self.content_length = len(body)
self.write(body.encode("latin-1"))
class WSGITask(Task):
"""A WSGI task produces a response from a WSGI application."""
environ = None
def execute(self):
environ = self.get_environment()
def start_response(status, headers, exc_info=None):
if self.complete and not exc_info:
raise AssertionError(
"start_response called a second time without providing exc_info."
)
if exc_info:
try:
if self.wrote_header:
# higher levels will catch and handle raised exception:
# 1. "service" method in task.py
# 2. "service" method in channel.py
# 3. "handler_thread" method in task.py
raise exc_info[1]
else:
# As per WSGI spec existing headers must be cleared
self.response_headers = []
finally:
exc_info = None
self.complete = True
if not status.__class__ is str:
raise AssertionError("status %s is not a string" % status)
if "\n" in status or "\r" in status:
raise ValueError(
"carriage return/line feed character present in status"
)
self.status = status
# Prepare the headers for output
for k, v in headers:
if not k.__class__ is str:
raise AssertionError(
"Header name %r is not a string in %r" % (k, (k, v))
)
if not v.__class__ is str:
raise AssertionError(
"Header value %r is not a string in %r" % (v, (k, v))
)
if "\n" in v or "\r" in v:
raise ValueError(
"carriage return/line feed character present in header value"
)
if "\n" in k or "\r" in k:
raise ValueError(
"carriage return/line feed character present in header name"
)
kl = k.lower()
if kl == "content-length":
self.content_length = int(v)
elif kl in hop_by_hop:
raise AssertionError(
'%s is a "hop-by-hop" header; it cannot be used by '
"a WSGI application (see PEP 3333)" % k
)
self.response_headers.extend(headers)
# Return a method used to write the response data.
return self.write
# Call the application to handle the request and write a response
app_iter = self.channel.server.application(environ, start_response)
can_close_app_iter = True
try:
if app_iter.__class__ is ReadOnlyFileBasedBuffer:
cl = self.content_length
size = app_iter.prepare(cl)
if size:
if cl != size:
if cl is not None:
self.remove_content_length_header()
self.content_length = size
self.write(b"") # generate headers
# if the write_soon below succeeds then the channel will
# take over closing the underlying file via the channel's
# _flush_some or handle_close so we intentionally avoid
# calling close in the finally block
self.channel.write_soon(app_iter)
can_close_app_iter = False
return
first_chunk_len = None
for chunk in app_iter:
if first_chunk_len is None:
first_chunk_len = len(chunk)
# Set a Content-Length header if one is not supplied.
# start_response may not have been called until first
# iteration as per PEP, so we must reinterrogate
# self.content_length here
if self.content_length is None:
app_iter_len = None
if hasattr(app_iter, "__len__"):
app_iter_len = len(app_iter)
if app_iter_len == 1:
self.content_length = first_chunk_len
# transmit headers only after first iteration of the iterable
# that returns a non-empty bytestring (PEP 3333)
if chunk:
self.write(chunk)
cl = self.content_length
if cl is not None:
if self.content_bytes_written != cl:
# close the connection so the client isn't sitting around
# waiting for more data when there are too few bytes
# to service content-length
self.close_on_finish = True
if self.request.command != "HEAD":
self.logger.warning(
"application returned too few bytes (%s) "
"for specified Content-Length (%s) via app_iter"
% (self.content_bytes_written, cl),
)
finally:
if can_close_app_iter and hasattr(app_iter, "close"):
app_iter.close()
def get_environment(self):
"""Returns a WSGI environment."""
environ = self.environ
if environ is not None:
# Return the cached copy.
return environ
request = self.request
path = request.path
channel = self.channel
server = channel.server
url_prefix = server.adj.url_prefix
if path.startswith("/"):
# strip extra slashes at the beginning of a path that starts
# with any number of slashes
path = "/" + path.lstrip("/")
if url_prefix:
# NB: url_prefix is guaranteed by the configuration machinery to
# be either the empty string or a string that starts with a single
# slash and ends without any slashes
if path == url_prefix:
# if the path is the same as the url prefix, the SCRIPT_NAME
# should be the url_prefix and PATH_INFO should be empty
path = ""
else:
# if the path starts with the url prefix plus a slash,
# the SCRIPT_NAME should be the url_prefix and PATH_INFO should
# the value of path from the slash until its end
url_prefix_with_trailing_slash = url_prefix + "/"
if path.startswith(url_prefix_with_trailing_slash):
path = path[len(url_prefix) :]
environ = {
"REMOTE_ADDR": channel.addr[0],
# Nah, we aren't actually going to look up the reverse DNS for
# REMOTE_ADDR, but we will happily set this environment variable
# for the WSGI application. Spec says we can just set this to
# REMOTE_ADDR, so we do.
"REMOTE_HOST": channel.addr[0],
# try and set the REMOTE_PORT to something useful, but maybe None
"REMOTE_PORT": str(channel.addr[1]),
"REQUEST_METHOD": request.command.upper(),
"SERVER_PORT": str(server.effective_port),
"SERVER_NAME": server.server_name,
"SERVER_SOFTWARE": server.adj.ident,
"SERVER_PROTOCOL": "HTTP/%s" % self.version,
"SCRIPT_NAME": url_prefix,
"PATH_INFO": path,
"REQUEST_URI": request.request_uri,
"QUERY_STRING": request.query,
"wsgi.url_scheme": request.url_scheme,
# the following environment variables are required by the WSGI spec
"wsgi.version": (1, 0),
# apps should use the logging module
"wsgi.errors": sys.stderr,
"wsgi.multithread": True,
"wsgi.multiprocess": False,
"wsgi.run_once": False,
"wsgi.input": request.get_body_stream(),
"wsgi.file_wrapper": ReadOnlyFileBasedBuffer,
"wsgi.input_terminated": True, # wsgi.input is EOF terminated
}
for key, value in dict(request.headers).items():
value = value.strip()
mykey = rename_headers.get(key, None)
if mykey is None:
mykey = "HTTP_" + key
if mykey not in environ:
environ[mykey] = value
# Insert a callable into the environment that allows the application to
# check if the client disconnected. Only works with
# channel_request_lookahead larger than 0.
environ["waitress.client_disconnected"] = self.channel.check_client_disconnected
# cache the environ for this request
self.environ = environ
return environ
|
teos.py | #!/usr/bin/python3
import os
import subprocess
import threading
import time
import re
import pathlib
import shutil
import pprint
import json
import sys
import math
import psutil
import eosfactory.core.errors as errors
import eosfactory.core.logger as logger
import eosfactory.core.utils as utils
import eosfactory.core.setup as setup
import eosfactory.core.config as config
import eosfactory.core.vscode as vscode
TEMPLATE_NAME = "CONTRACT_NAME"
TEMPLATE_HOME = "${HOME}"
C_CPP_PROP = "${c_cpp_prop}"
TASK_JSON = "${tasks}"
CONFIGURATIONS = "configurations"
BROWSE = "browse"
WORKSPACE_FOLDER = "${workspaceFolder}"
# The root directory of the Windows WSL, or empty string if not Windows:
ROOT = config.wsl_root()
HOME = ROOT + os.environ["HOME"] # Linux ~home<user name>
PROJECT_0_DIR = os.path.join(config.template_dir(), config.PROJECT_0)
ERR_MSG_IS_STUCK = "The process of 'nodeos' is stuck."
def resolve_home(string):
return string.replace(TEMPLATE_HOME, HOME)
def naturalize_path(path):
path = path.replace(TEMPLATE_HOME, HOME)
if path.find("/mnt/") != 0:
path = ROOT + path
return utils.wslMapLinuxWindows(path, back_slash=False)
def linuxize_path(path):
return utils.wslMapWindowsLinux(path.replace(ROOT, ""))
def get_c_cpp_properties(contract_dir=None, c_cpp_properties_path=None):
if not contract_dir:
contract_dir = os.getcwd()
if not c_cpp_properties_path:
c_cpp_properties_path = os.path.join(
contract_dir, ".vscode/c_cpp_properties.json")
else:
c_cpp_properties_path = linuxize_path(c_cpp_properties_path)
if not os.path.exists(c_cpp_properties_path):
c_cpp_properties_path = os.path.join(
contract_dir, ".vscode/c_cpp_properties.json")
if os.path.exists(c_cpp_properties_path):
try:
with open(c_cpp_properties_path, "r") as f:
return json.loads(f.read())
except Exception as e:
raise errors.Error(str(e))
else:
return json.loads(resolve_home(vscode.c_cpp_properties()))
def build(
contract_dir_hint, c_cpp_properties_path=None,
compile_only=False, is_test_mode=False, is_execute=False,
verbosity=None):
'''Produce ABI and WASM files.
Compiler options come with the argument 'c_cpp_properties_path', as
components of 'compilerOptions' list. Option can be any of the 'eosio-cpp'
options, plus the following ones:
* --src - list of the source files, absolute or relative to 'src' or
project directories, for example:
--src hello.cpp tests/hello_test.cpp
* -o - the same as the corresponding eosio-cpp option, but may be relative
to 'build' directory
Without any option set, the only source file is determined as a result of the function :func:`.core.config.contract_source_files`, if the result is a
single file. If it is not, an error is thrown, stating that the source file has to be specified with the '--src' option.
The ABI and WASM targets are named after the contract source file.
Args:
contract_dir_hint (str): Path, may be partial, to the project directory.
c_cpp_properties_path (str): If set, the path to a c_cpp_properties json
file in '.vscode' folder in the project directory.
compile_only (bool): If set, do not link.
verbosity (([.core.logger.Verbosity])): Verbosity parameter, used in
loggers.
'''
contract_dir = config.contract_dir(contract_dir_hint)
# contract_source_files[0] is directory, contract_source_files[1] is contents:
contract_source_files = config.contract_source_files(contract_dir)
c_cpp_properties = get_c_cpp_properties(
contract_dir, c_cpp_properties_path)
build_dir = get_target_dir(contract_dir)
target_path = None
compile_options = []
source_files = []
############################################################################
# begin compiler option logics
############################################################################
recardian_dir = "-R=" + get_recardian_dir(contract_source_files[0])
if is_test_mode \
and vscode.TEST_OPTIONS in c_cpp_properties[CONFIGURATIONS][0]:
compile_options_ = c_cpp_properties[CONFIGURATIONS][0]\
[vscode.TEST_OPTIONS]
elif not is_test_mode \
and vscode.CODE_OPTIONS in c_cpp_properties[CONFIGURATIONS][0]:
compile_options_ = c_cpp_properties[CONFIGURATIONS][0]\
[vscode.CODE_OPTIONS]
else:
compile_options_ = []
contract_src_name = None
is_verbose = False
if not "-abigen" in compile_options_:
compile_options.append("-abigen")
if is_test_mode and not "-fnative" in compile_options_:
compile_options.append("-fnative")
for i in range(0, len(compile_options_)):
entry = compile_options_[i]
if "-R=" in entry:
recardian_dir = entry
elif "-contract=" in entry:
contract_src_name = entry.replace("-contract=", "").strip()
compile_options.append(entry)
elif "--verbose" in entry:
is_verbose = True
elif "-o" in entry:
target_path = utils.wslMapWindowsLinux(
entry.replace("-o", "").strip())
if not target_path:
if i + 1 < len(compile_options_):
target_path = compile_options_[i + 1]
else:
raise errors.Error('''
The option '-o' does not has its value set:
{}
'''.format(compile_options_))
if not os.path.isabs(target_path):
target_path = os.path.join(build_dir, target_path)
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except Exception as e:
raise errors.Error('''
Cannot make directory set with the option '-o'.
{}
'''.format(str(e)))
elif "-abigen_output" in entry:
abigen_path = utils.wslMapWindowsLinux(
entry.replace("-abigen_output=", "").strip())
if not os.path.isabs(abigen_path):
abigen_path = os.path.join(build_dir, abigen_path)
abigen_dir = os.path.dirname(abigen_path)
if not os.path.exists(abigen_dir):
try:
os.makedirs(abigen_dir)
except Exception as e:
raise errors.Error('''
Cannot make directory set with the option '-abigen_output'.
{}
'''.format(str(e)))
compile_options.append("-abigen_output={}".format(abigen_path))
elif "--src" in entry:
input_files_ = utils.wslMapWindowsLinux(
entry.replace("--src", "").strip())
if not input_files_:
next_index = i + 1
while True:
if next_index >= len(compile_options_):
break
next_item = compile_options_[next_index]
if "-" in next_item:
break
input_files_ = input_files_ + " " + next_item
if not input_files_:
raise errors.Error('''
The option '--src' does not has its value set:
{}
'''.format(compile_options_))
for input_file in input_files_.split(" "):
temp = input_file
if not os.path.isabs(temp):
temp = os.path.join(contract_source_files[0], input_file)
if not contract_src_name:
contract_src_name = os.path.splitext(
os.path.basename(temp))[0]
if not os.path.exists(temp):
temp = os.path.join(contract_dir, input_file)
if not os.path.exists(temp):
raise errors.Error('''
The source file
{}
cannot be found. It is neither absolute nor relative to the contract directory
or relative to the 'src' directory.
'''.format(input_file))
temp = os.path.normpath(temp)
if not temp in source_files:
source_files.append(temp)
else:
compile_options.append(entry)
compile_options.append(recardian_dir)
if not source_files:
source_files = contract_source_files[1]
if not source_files:
raise errors.Error('''
Cannot find any source file (".c", ".cpp",".cxx", ".c++") in the contract folder.
''')
if not is_test_mode and len(source_files) > 1:
raise errors.Error('''
Cannot determine the source file of the contract. There is many files in
the 'src' directory, namely:
{}
Specify the file with the compiler option '--src', for
example:
--src src_dir/hello.cpp
The file path is to be absolute or relative to the project directory.
'''.format("\n".join(source_files)))
if not contract_src_name:
contract_src_name = os.path.splitext(
os.path.basename(source_files[0]))[0]
if not contract_src_name and len(source_files) == 1:
contract_src_name = os.path.splitext(
os.path.basename(source_files[0]))[0]
############################################################################
# end compiler option logics
############################################################################
if not target_path:
target_path = os.path.normpath(
os.path.join(build_dir, contract_src_name + ".wasm"))
abigen_path = os.path.normpath(
os.path.join(build_dir, contract_src_name + ".abi"))
if is_execute:
logger.TRACE('''
Executing target
{}
'''.format(target_path))
command_line = [target_path]
if setup.is_print_command_lines and setup.is_save_command_lines:
setup.add_to__command_line_file(" ".join(command_line))
if setup.is_print_command_lines or is_verbose:
logger.DEBUG('''
######## command line:
{}
'''.format(" ".join(command_line)), [logger.Verbosity.DEBUG])
utils.long_process(command_line, build_dir, is_verbose=True,
prompt=target_path)
return
command_line = [config.eosio_cpp()]
if compile_only:
command_line.append("-c")
else:
command_line.extend(["-o", target_path])
for entry in c_cpp_properties[CONFIGURATIONS][0][vscode.INCLUDE_PATH]:
if WORKSPACE_FOLDER in entry:
entry = entry.replace(WORKSPACE_FOLDER, contract_dir)
command_line.append("-I=" + linuxize_path(entry))
else:
path = linuxize_path(entry)
if not path in config.eosio_cpp_includes():
command_line.append(
"-I=" + path)
for entry in c_cpp_properties[CONFIGURATIONS][0][vscode.LIBS]:
command_line.append(
"-l=" + linuxize_path(entry))
for entry in compile_options:
command_line.append(entry)
for input_file in source_files:
command_line.append(input_file)
if setup.is_print_command_lines and setup.is_save_command_lines:
setup.add_to__command_line_file(" ".join(command_line))
if setup.is_print_command_lines or is_verbose:
logger.DEBUG('''
######## command line:
{}
'''.format(" ".join(command_line)), [logger.Verbosity.DEBUG])
utils.long_process(command_line, build_dir, is_verbose=True,
prompt="eosio-cpp")
if not compile_only:
if "wasm" in target_path:
logger.TRACE('''
ABI file writen to file:
{}
'''.format(os.path.normpath(abigen_path)), verbosity)
logger.TRACE('''
WASM file writen to file:
{}
'''.format(os.path.normpath(target_path)), verbosity)
else:
logger.TRACE('''
terget writen to file:
{}
'''.format(os.path.normpath(target_path)), verbosity)
print("eosio-cpp: OK")
def project_from_template(
project_name, template=None, workspace_dir=None,
c_cpp_prop_path=None,
includes=None,
libs=None,
remove_existing=False,
open_vscode=False, throw_exists=False,
verbosity=None):
'''Given the project name and template name, create a smart contract project.
- **parameters**::
project_name: The name of the project, or an existing path to
a directory.
template: The name of the template used.
workspace_dir: If set, the folder for the work-space. Defaults to the
value returned by the config.contract_workspace_dir() function.
includes: If set, comma-separated list of include folders.
libs: If set, comma-separated list of libraries.
remove_existing: If set, overwrite any existing project.
visual_studio_code: If set, open the ``VSCode``, if available.
verbosity: The logging configuration.
'''
project_name = linuxize_path(project_name.strip())
template = linuxize_path(template.strip())
template_dir = template if os.path.isdir(template) else \
os.path.join(config.template_dir(), template)
if not os.path.isdir(template_dir):
raise errors.Error('''
The contract project template '{}' does not exist.
'''.format(template_dir))
if c_cpp_prop_path:
c_cpp_prop_path = linuxize_path(c_cpp_prop_path)
if os.path.exists(c_cpp_prop_path):
try:
with open(c_cpp_prop_path, "r") as f:
c_cpp_properties = f.read()
except Exception:
c_cpp_properties = vscode.c_cpp_properties()
else:
c_cpp_properties = vscode.c_cpp_properties()
c_cpp_properties_json = json.loads(c_cpp_properties)
if includes:
temp = includes.split(", ")
temp_ = []
for entry in temp:
path = naturalize_path(entry)
if not path in c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.INCLUDE_PATH]:
temp_.append(path)
c_cpp_properties_json[CONFIGURATIONS][0][vscode.INCLUDE_PATH]\
.extend(temp_)
c_cpp_properties_json[CONFIGURATIONS][0][BROWSE]["path"].extend(temp_)
path = config.eoside_includes_dir()
if path:
path = naturalize_path(path)
if not path in c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.INCLUDE_PATH]:
c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.INCLUDE_PATH].append(path)
c_cpp_properties_json[CONFIGURATIONS][0][BROWSE]["path"]\
.append(path)
if libs:
temp = libs.split(", ")
temp_ = []
for entry in libs:
path = naturalize_path(entry)
if not path in c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.LIBS]:
temp_.append(path)
c_cpp_properties_json[CONFIGURATIONS][0][vscode.LIBS].extend(temp_)
eoside_libs = config.eoside_libs_dir()
if(eoside_libs):
eoside_libs = os.listdir(config.eoside_libs_dir())
for lib in eoside_libs:
path = naturalize_path(lib)
if not path in c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.LIBS]:
c_cpp_properties_json[CONFIGURATIONS][0]\
[vscode.LIBS].append(path)
c_cpp_properties = json.dumps(c_cpp_properties_json, indent=4)
c_cpp_properties = resolve_home(c_cpp_properties)
split = os.path.split(project_name)
if os.path.isdir(split[0]):
project_dir = project_name
project_name = split[1]
else:
if not workspace_dir \
or not os.path.isabs(workspace_dir) \
or not os.path.exists(workspace_dir):
workspace_dir = config.contract_workspace_dir()
workspace_dir = workspace_dir.strip()
project_dir = os.path.join(workspace_dir, project_name)
if os.path.isdir(project_dir):
if os.listdir(project_dir):
if remove_existing:
try:
shutil.rmtree(project_dir)
except Exception as e:
raise errors.Error('''
Cannot remove the directory {}.
error message:
==============
{}
'''.format(project_dir, str(e)))
else:
msg = '''
NOTE:
Contract workspace
'{}'
already exists. Cannot overwrite it.
'''.format(project_dir)
if throw_exists:
raise errors.Error(msg)
else:
raise errors.Error(msg)
try:
os.makedirs(os.path.join(project_dir, "build"))
os.makedirs(os.path.join(project_dir, "tests"))
os.makedirs(os.path.join(project_dir, "include"))
except Exception as e:
raise errors.Error(str(e))
def copy_dir_contents(
project_dir, template_dir, directory, project_name):
contents = os.listdir(os.path.join(template_dir, directory))
for item in contents:
path = os.path.join(directory, item)
template_path = os.path.join(template_dir, path)
contract_path = os.path.join(
project_dir, path.replace(
TEMPLATE_NAME, project_name))
if os.path.isdir(template_path) \
and not "__pycache__" in template_path:
if not os.path.exists(contract_path):
os.mkdir(contract_path)
copy_dir_contents(
project_dir, template_dir, path, project_name)
elif os.path.isfile(template_path):
copy(template_path, contract_path, project_name)
def copy(template_path, contract_path, project_name):
with open(template_path, "r") as f:
template = f.read()
if TEMPLATE_HOME in template:
resolve_home(template)
template = template.replace(C_CPP_PROP, c_cpp_properties)
template = template.replace(TASK_JSON, vscode.TASKS)
template = template.replace("${" + TEMPLATE_NAME + "}", project_name)
with open(contract_path, "w") as output:
output.write(template)
copy_dir_contents(project_dir, PROJECT_0_DIR, "", project_name)
if not template_dir == PROJECT_0_DIR:
copy_dir_contents(project_dir, template_dir, "", project_name)
if open_vscode:
if utils.is_windows_ubuntu():
command_line = "cmd.exe /C code {}".format(
utils.wslMapLinuxWindows(project_dir))
elif utils.os_version() == utils.DARWIN:
command_line = "open -n -b com.microsoft.VSCode --args {}".format(
project_dir)
else:
command_line = "code {}".format(project_dir)
os.system(command_line)
logger.INFO('''
######## Created contract project '{}',
originated from template
'{}'.
'''.format(project_dir, template_dir), verbosity)
return project_dir
def get_pid(name=None):
"""Return process ids found by name.
"""
if not name:
name = os.path.splitext(os.path.basename(config.node_exe()))[0]
pids = []
processes = [p.info for p in psutil.process_iter(attrs=["pid", "name"]) \
if p.info["name"] and name in p.info["name"]]
for process in processes:
pids.append(process["pid"])
return pids
def get_target_dir(contract_dir):
path = os.path.join(contract_dir, "build")
if os.path.exists(path):
return path
try:
os.mkdir(path)
except Exception as e:
raise errors.Error(str(e))
return path
def get_recardian_dir(source_dir):
path = os.path.join(source_dir, "..", "ricardian")
if os.path.exists(path):
return path
path = os.path.join(source_dir, "ricardian")
if not os.path.exists(path):
try:
os.mkdir(path)
except Exception as e:
raise errors.Error(str(e))
return path
def get_include_dir(source_dir):
path = os.path.join(source_dir, "..", "include")
if os.path.exists(path):
return path
path = os.path.join(source_dir, "include")
if not os.path.exists(path):
try:
os.mkdir(path)
except Exception as e:
raise errors.Error(str(e))
return path
def args(clear=False):
args_ = [
"--http-server-address", config.http_server_address(),
"--access-control-allow-origin=*",
"--chain-state-db-size-mb", config.chain_state_db_size_mb(),
"--contracts-console",
"--verbose-http-errors",
"--enable-stale-production",
"--producer-name eosio",
"--signature-provider " + config.eosio_key_public() + "=KEY:"
+ config.eosio_key_private(),
"--plugin eosio::producer_plugin",
"--plugin eosio::chain_api_plugin",
"--plugin eosio::http_plugin",
]
if config.nodeos_config_dir():
args_.extend(["--config-dir", config.nodeos_config_dir()])
if config.nodeos_data_dir():
args_.extend(["--data-dir", config.nodeos_data_dir()])
if config.nodeos_options():
args_.extend(nodeos_options())
if clear:
node_stop()
args_.extend(["--delete-all-blocks"])
if config.genesis_json():
args_.extend(["--genesis-json", config.genesis_json()])
return args_
def keosd_start():
if not config.keosd_wallet_dir(raise_error=False):
utils.spawn([config.keosd_exe()])
while True:
time.sleep(1)
if config.keosd_wallet_dir(raise_error=False):
break
def on_nodeos_error(clear=False):
ERROR_WAIT_TIME = 5
NOT_ERROR = [
"exit shutdown",
"configuration items in the config.ini file are redundantly",
]
node_stop()
args_ = args(clear)
args_.insert(0, config.node_exe())
command_line = " ".join(args_)
logger.ERROR('''
The local 'nodeos' failed to start few times in sequence. Perhaps, something is
wrong with configuration of the system. See the command line issued:
''')
print("\n{}\n".format(command_line))
print('''
Now, see the result of execution of the command line:
''')
def runInThread():
proc = subprocess.Popen(
" ".join(args_),
stdin=subprocess.DEVNULL, stdout=std_out_handle,
stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
err_msg = err.decode("ISO-8859-1")
not_error = False
if err_msg:
for item in NOT_ERROR:
if item in err_msg:
not_error = True
break
if not_error:
print(
'''
Just another hang incident of the 'nodeos' executable.''')
if clear:
print(
'''
Rerun the script.
''')
else:
print(
'''
Rerun the script with 'nodeos' restarted.
''')
else:
print(err_msg)
thread = threading.Thread(target=runInThread)
thread.start()
# Wait for the nodeos process to crash
for i in (0, int(ERROR_WAIT_TIME)):
print(".", end="", flush=True)
time.sleep(ERROR_WAIT_TIME)
print()
# Kill the process: it is stuck, or it is running well.
node_stop()
exit()
std_out_handle = None
def node_start(clear=False, nodeos_stdout=None):
'''Start the local EOSIO node.
Args:
clear (bool): If set, the blockchain is deleted and then re-created.
nodeos_stdout (str): If set, a file where *stdout* stream of
the local *nodeos* is send. Note that the file can be included to
the configuration of EOSFactory, see :func:`.core.config.nodeos_stdout`.
If the file is set with the configuration, and in the same time
it is set with this argument, the argument setting prevails.
'''
args_ = args(clear)
if not nodeos_stdout:
nodeos_stdout = config.nodeos_stdout()
global std_out_handle
std_out_handle = subprocess.DEVNULL
if nodeos_stdout:
try:
std_out_handle = open(nodeos_stdout, 'w')
except Exception as e:
raise errors.Error('''
Error when preparing to start the local EOS node,
opening the given stdout log file that is
{}
Error message is
{}
'''.format(nodeos_stdout, str(e)))
def onExit():
global std_out_handle
if not std_out_handle == subprocess.DEVNULL:
try:
std_out_handle.close()
except:
pass
if setup.is_save_command_lines:
setup.add_to__command_line_file(
config.node_exe() + " " + " ".join(args_))
if setup.is_print_command_lines:
print("######## nodeos command line:")
print(config.node_exe() + " " + " ".join(args_))
args_.insert(0, config.node_exe())
def runInThread():
logger.INFO('''
Run the node: Block number is {}
'''.format(" ".join(args_)))
proc = subprocess.Popen(
" ".join(args_),
stdin=subprocess.DEVNULL, stdout=std_out_handle,
stderr=subprocess.DEVNULL, shell=True)
proc.wait()
onExit()
return
thread = threading.Thread(target=runInThread)
thread.start()
def node_probe():
DELAY_TIME = 4
WAIT_TIME = 1
NUMBER_BLOCKS_ADDED = 3
NUMBER_GET_INFO_CALLS = 7
CHECK_COUNT = 2
RATIO_THRESHOLD = 2.5
NODEOS = "nodeos"
count = NUMBER_GET_INFO_CALLS
block_num = None
pid = None
for i in range(0, 5):
pids = [p.info for p in psutil.process_iter(attrs=["pid", "name"]) \
if p.info["name"] and NODEOS in p.info["name"]]
if pids and pids[0]["name"] == NODEOS:
pid = pids[0]["pid"]
break
time.sleep(0.5)
if not pid:
raise errors.Error('''
Local node has failed to start.
''')
proc = psutil.Process(pid)
cpu_percent_start = proc.cpu_percent(interval=WAIT_TIME)
print("Starting nodeos, cpu percent: ", end="", flush=True)
for i in range(0, int(DELAY_TIME / WAIT_TIME)):
cpu_percent = proc.cpu_percent(interval=WAIT_TIME)
print("{:.0f}, ".format(cpu_percent), end="", flush=True)
while True:
if not proc.is_running():
raise errors.Error('''
Local node has stopped.
''')
count = count - 1
cpu_percent = proc.cpu_percent(interval=WAIT_TIME)
try:
import eosfactory.core.cleos_get as cleos_get
head_block_num = cleos_get.GetInfo(is_verbose=0).head_block
if block_num is None:
block_num = head_block_num
count = int(NUMBER_BLOCKS_ADDED * 0.5/WAIT_TIME) + 1
except:
head_block_num = 0
pass
if block_num:
print("{:.0f}* ".format(cpu_percent), end="", flush=True)
else:
print("{:.0f}; ".format(cpu_percent), end="", flush=True)
if count == CHECK_COUNT and not block_num and \
cpu_percent_start / cpu_percent < RATIO_THRESHOLD:
print(" stuck.")
raise errors.Error(ERR_MSG_IS_STUCK)
if block_num and head_block_num - block_num >= NUMBER_BLOCKS_ADDED:
print()
logger.INFO('''
Local node is running. Block number is {}
'''.format(head_block_num))
break
if count <= 0:
print()
raise errors.Error('''
The local node does not respond.
''')
def is_local_node_process_running():
return len(get_pid()) > 0
def kill(name):
pids = get_pid(name)
count = 10
for pid in pids:
p = psutil.Process(pid)
p.terminate()
while count > 0:
time.sleep(1)
if not psutil.pid_exists(pid):
break
count = count -1
if count <= 0:
raise errors.Error('''
Failed to kill {}. Pid is {}.
'''.format(
os.path.splitext(os.path.basename(config.node_exe()))[0], str(pids))
)
return pids
def kill_keosd():
kill(os.path.splitext(os.path.basename(config.keosd_exe()))[0])
def node_stop(verbose=True):
# You can see if the process is a zombie by using top or
# the following command:
# ps aux | awk '$8=="Z" {print $2}'
kill_keosd()
pids = kill(os.path.splitext(os.path.basename(config.node_exe()))[0])
if verbose:
logger.INFO('''
Local node is stopped {}.
'''.format(str(pids)))
def node_is_running():
return not get_pid()
|
dumping_callback_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import socket
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import models
from tensorflow.python.keras.applications import mobilenet_v2
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import recurrent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _create_simple_recurrent_keras_model(input_shape):
"""Create a simple tf.keras model containing a recurrent layer for testing."""
model = models.Sequential()
model.add(recurrent_v2.LSTM(
10,
input_shape=input_shape,
kernel_initializer="zeros",
recurrent_initializer="zeros"))
model.add(core.Dense(1, kernel_initializer="zeros"))
model.compile(loss="mse", optimizer="sgd")
return model
_host_name = socket.gethostname()
_current_file_full_path = os.path.abspath(__file__)
class TracingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(TracingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(TracingCallbackTest, self).tearDown()
def _verifyStackFrames(self, stack_frames):
"""Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
"""
self.assertTrue([
frame for frame in stack_frames if frame[0] == _current_file_full_path])
def _expectedDefaultDeviceName(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
return "/job:localhost/replica:0/task:0" + gpu_name
else:
return "/job:localhost/replica:0/task:0/device:CPU:0"
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
self.assertFalse(reader.executions())
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for execution in executions:
self.assertGreaterEqual(execution.wall_time, prev_wall_time)
prev_wall_time = execution.wall_time
executed_op_types.append(execution.op_type)
# Check the device name.
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(execution.output_tensor_device_ids[0]),
self._expectedDefaultDeviceName(),
"Unexpected device name from eager op %s" % execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.debug_tensor_values)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(execution.debug_tensor_values, [[-1.0, 0.0]])
elif tensor_debug_mode == "CONCISE_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
execution.debug_tensor_values, [[-1, 1, 0, 0, 0]])
elif tensor_debug_mode == "SHAPE":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(execution.debug_tensor_values,
[[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0]])
elif tensor_debug_mode == "FULL_TENSOR":
tensor_values[execution.op_type].append(
reader.execution_to_tensor_values(execution)[0])
host_name, stack_frames = reader.read_execution_stack_trace(execution)
self.assertEqual(host_name, _host_name)
self._verifyStackFrames(stack_frames)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
self.assertFalse(reader.outermost_graphs())
self.assertEqual(reader.num_graph_execution_traces(), 0)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertCountEqual(executed_op_types, ["AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: indicates if there is any inf or nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1])
else:
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count (8).
# Remaining 3 elements: The counts of -inf, inf and nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 1, 3, 2])
else:
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 0, 0, 0])
else: # SHAPE.
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float16 = 19).
# 3rd element: rank (1)
# 4th element: element count (8).
# Remaining elements: shape at fixed length (6).
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool)
y = np.array([[False, True], [False, True]], dtype=np.bool)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types, ["LogicalAnd", "LogicalNot"])
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (bool).
# 3rd element: rank (2).
# 4th element: element count (4).
# Remaining elements: shape at fixed length.
self.assertAllClose(
trace.debug_tensor_value, [tensor_id, 10, 2, 4, 2, 2, 0, 0, 0, 0])
def testReadingSourceLines(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source-file contents are
# dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
with open(_current_file_full_path, "rt") as f:
file_lines = f.read().split("\n")
self.assertEqual(
reader.source_lines(_host_name, _current_file_full_path), file_lines)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
outermost_graphs = reader.outermost_graphs()
self.assertLen(outermost_graphs, 1)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
executions = reader.executions()
self.assertLen(executions, 1)
self.assertIn("sin1p_log_sum", executions[0].op_type)
# Get the executed graph and verify its identity and inner graph.
graph = reader.graph_by_id(executions[0].graph_id)
self.assertEqual(graph.name, "sin1p_log_sum")
self.assertLen(graph.inner_graph_ids, 1)
inner_graph = reader.graph_by_id(graph.inner_graph_ids[0])
self.assertEqual(inner_graph.name, "log_sum")
# Check device names.
self.assertLen(executions[0].output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(executions[0].output_tensor_device_ids[0]),
self._expectedDefaultDeviceName())
self.assertIn(self._expectedDefaultDeviceName(), reader.device_names())
# Verify the recorded graph-building history.
add_op_digests = reader.graph_op_digests(op_type="AddV2")
self.assertLen(add_op_digests, 2)
self.assertEqual(
reader.graph_by_id(add_op_digests[0].graph_id).name, "log_sum")
self.assertEqual(
reader.graph_by_id(add_op_digests[1].graph_id).name, "sin1p_log_sum")
log_op_digests = reader.graph_op_digests(op_type="Log")
self.assertLen(log_op_digests, 1)
self.assertEqual(
reader.graph_by_id(log_op_digests[0].graph_id).name, "log_sum")
sin_op_digests = reader.graph_op_digests(op_type="Sin")
self.assertLen(sin_op_digests, 1)
self.assertEqual(
reader.graph_by_id(sin_op_digests[0].graph_id).name, "sin1p_log_sum")
# Verify the output tensor IDs and the stack traces.
for op_digest in add_op_digests + log_op_digests + sin_op_digests:
# These are all single-output ops.
self.assertLen(op_digest.output_tensor_ids, 1)
self.assertGreaterEqual(op_digest.output_tensor_ids[0], 0)
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
self._verifyStackFrames(stack_frames)
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_traces]
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2", "Sin"])
# Verify the graph ID stack of each op.
# 1st AddV2 op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[0].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(graph_exec_traces[0].graph_ids[-2]).name,
"sin1p_log_sum")
# Log op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[1].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(graph_exec_traces[1].graph_ids[-2]).name,
"sin1p_log_sum")
# 2nd AddV2 op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[2].graph_ids[-1]).name,
"sin1p_log_sum")
# Sin op.
self.assertEqual(
reader.graph_by_id(graph_exec_traces[3].graph_ids[-1]).name,
"sin1p_log_sum")
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for trace in graph_exec_traces:
self.assertEqual(trace.debug_tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
# Test the association between graph exec and prior graph building.
# In each case, the 1st element of debug_tensor_value is the ID of the
# symbolic tenosr and the 2nd element is a zero indicating there is no
# inf or nan.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 0.0]) # 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 0.0]) # Log op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 0.0]) # 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 0.0]) # Sin op.
elif tensor_debug_mode == "CONCISE_HEALTH":
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
# 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Log op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Sin op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
elif tensor_debug_mode == "SHAPE":
# 1st element: tensor_id.
# 2nd element: dtype (float32).
# 3rd element: rank (scalar).
# 4th element: element count (1).
# Remaining elements: shape padded to fixed length (6).
# 1st AddV2 op.
self.assertAllClose(
graph_exec_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Log op.
self.assertAllClose(
graph_exec_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# 2nd AddV2 op.
self.assertAllClose(
graph_exec_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Sin op.
self.assertAllClose(
graph_exec_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
else: # FULL_TENSOR.
full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
self.assertAllClose(full_tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(full_tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
full_tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(
full_tensor_values[3], np.sin(np.log(5.0) + 1.0)) # Sin op.
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
x_float32 = np.array(3.5, dtype=np.float32)
x_float64 = np.array(4.5, dtype=np.float64)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
x = np.array(3.5, dtype=np.float32)
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegexp(
ValueError,
r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
executions = reader.executions()
self.assertLen(executions, 1)
executed_op_types = [execution.op_type for execution in executions]
self.assertIn("iterative_doubling", executions[0].op_type)
execution = executions[0]
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "FULL_TENSOR":
tensor_values = reader.execution_to_tensor_values(execution)
self.assertAllClose(tensor_values, [8.0])
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
if tensor_debug_mode != "CURT_HEALTH":
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# AddV2 produces an int tensor, which is not tracked under CURT_HEALTH.
# The AddV2 op should have been run, but we refrain from asserting on
# how many times it's executed.
self.assertIn("AddV2", executed_op_types)
for trace in graph_exec_traces:
self.assertEqual(trace.output_slot, 0)
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor_id; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Less"]
self.assertAllEqual(less_values, [True, True, True, True, False])
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(new_dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
with debug_events_reader.DebugDataReader(
self.dump_root) as old_dump_root_reader:
old_dump_root_reader.update()
# The old dump root shouldn't have been written to.
self.assertEqual(old_dump_root_reader.num_executions(), 0)
self.assertFalse(old_dump_root_reader.outermost_graphs())
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert calling enable_dump_debug_info() with two tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
# Under NO_TENSOR mode, each tensor is summarized as an empty float32
# array.
self.assertAllEqual(tensor_value, [])
with self.assertRaisesRegexp(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
x = constant_op.constant([10.0, 12.0, 10.0])
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
self.assertFalse(reader.outermost_graphs())
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
prev_wall_time = 1
for exec_digest in exec_digests:
self.assertGreaterEqual(exec_digest.wall_time, prev_wall_time)
prev_wall_time = exec_digest.wall_time
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for trace in graph_exec_traces:
# These are all single-output tensors.
self.assertEqual(trace.output_slot, 0)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for tensor_value in tensor_values:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1, 0, 0, 0])
elif tensor_debug_mode == "SHAPE":
for trace in graph_exec_traces:
if trace.op_type == "Mul":
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
mul_value = reader.graph_execution_trace_to_tensor_value(trace)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float32).
# 3rd element: rank.
# 4th element: element count.
self.assertAllClose(mul_value, [tensor_id, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
with debug_events_reader.DebugDataReader(dump_root_1) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
negative_v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
with debug_events_reader.DebugDataReader(dump_root_2) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
executed_op_types = [digest.op_type for digest in exec_digests]
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
self.assertAllClose(v2_squared_values, [[9.0]])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
less_op_digest = reader.graph_op_digests(op_type="Less")[-1]
mul_op_digest = reader.graph_op_digests(op_type="Mul")[-1]
sub_op_digest = reader.graph_op_digests(op_type="Sub")[-1]
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are
# both from the while-loop body context.
self.assertNotEqual(less_op_digest.graph_id, mul_op_digest.graph_id)
self.assertNotEqual(less_op_digest.graph_id, sub_op_digest.graph_id)
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_op_digest.graph_id, sub_op_digest.graph_id)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelPredict(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
batch_size = 5
xs = np.ones([batch_size, 3, 4])
self.assertAllClose(model.predict(xs), np.zeros([batch_size, 1]))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
self.assertTrue(reader.executions(digest=True))
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model prediction.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought to
# be an empty float32 tensor.
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
else:
# Refrain from asserting the internal implementation details of the LSTM
# layer.
self.assertTrue(any(
bool(tensor_value.size) for tensor_value in tensor_values))
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testSimpleKerasRecurrentModelFit(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
model = _create_simple_recurrent_keras_model([3, 4])
xs = np.ones([5, 3, 4])
ys = np.ones([5, 1])
history = model.fit(xs, ys, epochs=3, verbose=0)
self.assertAllClose(
history.history["loss"], [1.0, 0.9603999853134155, 0.9223681688308716])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
if tensor_debug_mode == "NO_TENSOR":
for digest in exec_digests:
tensor_values = reader.execution_to_tensor_values(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the recurrent model's fit() call.
self.assertIn("MatMul", executed_op_types)
self.assertIn("BiasAdd", executed_op_types)
# On the GPU, CudnnRNN is used in lieu of the default op-by-op
# implementation.
self.assertTrue(
("Sigmoid" in executed_op_types and "Tanh" in executed_op_types or
"CudnnRNN" in executed_op_types))
self.assertTrue(
("SigmoidGrad" in executed_op_types and
"TanhGrad" in executed_op_types or
"CudnnRNNBackprop" in executed_op_types))
if tensor_debug_mode == "NO_TENSOR":
for digest in graph_exec_digests:
tensor_values = reader.graph_execution_trace_to_tensor_value(digest)
for tensor_value in tensor_values:
self.assertEqual(tensor_value, [])
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testMobiletNetV2Fit(self, tensor_debug_mode):
"""Test training Keras MobileNetV2 works with dumping."""
# Use a large circular-buffer to make sure we capture all the executed ops.
writer = dumping_callback.enable_dump_debug_info(
self.dump_root,
tensor_debug_mode=tensor_debug_mode,
circular_buffer_size=100000)
model = mobilenet_v2.MobileNetV2(
input_shape=(32, 32, 3), alpha=0.1, weights=None)
y = model.layers[22].output
y = core.Flatten()(y)
y = core.Dense(1)(y)
model = models.Model(inputs=model.inputs, outputs=y)
batch_size = 2
xs = np.zeros([batch_size] + list(model.input_shape[1:]))
ys = np.zeros([batch_size] + list(model.output_shape[1:]))
model.compile(optimizer="sgd", loss="mse")
epochs = 1
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertLen(history.history["loss"], epochs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
# .execution file.
exec_digests = reader.executions(digest=True)
self.assertTrue(exec_digests)
graph_exec_digests = reader.graph_execution_traces()
executed_op_types = [digest.op_type for digest in graph_exec_digests]
# These are the ops that we can safely assume to have been executed during
# the model's fit() call.
self.assertIn("Conv2D", executed_op_types)
self.assertIn("Relu6", executed_op_types)
self.assertIn("Conv2DBackpropFilter", executed_op_types)
self.assertIn("Relu6Grad", executed_op_types)
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
tensor_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "FULL_TENSOR":
conv2d_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Conv2D"]
self.assertTrue(conv2d_values)
for conv2d_value in conv2d_values:
self.assertGreater(len(conv2d_value.shape), 1)
self.assertEqual(conv2d_value.shape[0], batch_size)
relu6_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6"]
self.assertTrue(relu6_values)
for relu6_value in relu6_values:
self.assertGreater(len(relu6_value.shape), 1)
self.assertEqual(relu6_value.shape[0], batch_size)
conv2d_bp_filter_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type == "Conv2DBackpropFilter"]
self.assertTrue(conv2d_bp_filter_values)
for conv2d_bp_filter_value in conv2d_bp_filter_values:
self.assertGreater(len(conv2d_bp_filter_value.shape), 1)
relu6_grad_values = [
reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests if digest.op_type == "Relu6Grad"]
self.assertTrue(relu6_grad_values)
for relu6_grad_value in relu6_grad_values:
self.assertGreater(len(relu6_grad_value.shape), 1)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
foreman.py | # vim:ts=4:sts=4:sw=4:expandtab
import copy
import datetime
import dateutil.parser
import glob
import json
import logging
import math
from multiprocessing import Process
import os
import random
import shutil
import subprocess
import sys
import tempfile
import traceback
from threading import Thread
import time
import uuid
from kolejka.common import kolejka_config, foreman_config
from kolejka.common import KolejkaTask, KolejkaResult, KolejkaLimits
from kolejka.common import MemoryAction, TimeAction, parse_memory
from kolejka.client import KolejkaClient
from kolejka.common.images import (
pull_docker_image,
get_docker_image_size,
check_docker_image_existance,
list_docker_images,
remove_docker_image
)
from kolejka.worker.stage0 import stage0
from kolejka.worker.volume import check_python_volume
def manage_images(pull, size, necessary_images, priority_images):
necessary_size = sum(necessary_images.values(), 0)
free_size = size - necessary_size
assert free_size >= 0
docker_images = list_docker_images()
p_images = dict()
for image in priority_images:
if image in docker_images:
p_images[image] = docker_images[image]
priority_images = p_images
keep_images = set()
for image in necessary_images:
keep_images.add(image)
list_images = list(priority_images.items())
random.shuffle(list_images)
li = list(docker_images.items())
random.shuffle(li)
list_images += li
for image,size in list_images:
if image in keep_images:
continue
if size <= free_size:
free_size -= size
keep_images.add(image)
for image in docker_images:
if image not in keep_images:
remove_docker_image(image)
for image, size in necessary_images.items():
pull_image = pull
if not pull_image:
if not check_docker_image_existance(image):
pull_image = True
if pull_image:
pull_docker_image(image)
image_size = get_docker_image_size(image)
assert image_size <= size
def foreman_single(temp_path, task):
config = foreman_config()
with tempfile.TemporaryDirectory(temp_path) as jailed_path:
if task.limits.workspace is not None:
subprocess.run(['mount', '-t', 'tmpfs', '-o', 'size='+str(task.limits.workspace), 'none', jailed_path], check=True)
try:
task_path = os.path.join(jailed_path, 'task')
result_path = os.path.join(jailed_path, 'result')
temp_path = os.path.join(jailed_path, 'temp')
os.makedirs(task_path, exist_ok=True)
os.makedirs(result_path, exist_ok=True)
os.makedirs(temp_path, exist_ok=True)
task.path = task_path
client = KolejkaClient()
client.task_get(task.id, task_path)
for k,f in task.files.items():
f.path = k
task.commit()
stage0(task.path, result_path, temp_path=temp_path, consume_task_folder=True)
result = KolejkaResult(result_path)
result.tags = config.tags
client.result_put(result)
except:
traceback.print_exc()
finally:
if task.limits.storage is not None:
subprocess.run(['umount', '-l', jailed_path])
def foreman():
config = foreman_config()
limits = KolejkaLimits()
limits.cpus = config.cpus
limits.memory = config.memory
limits.swap = config.swap
limits.pids = config.pids
limits.storage = config.storage
limits.image = config.image
limits.workspace = config.workspace
limits.time = config.time
limits.network = config.network
limits.gpus = config.gpus
client = KolejkaClient()
while True:
try:
tasks = client.dequeue(config.concurency, limits, config.tags)
if len(tasks) == 0:
time.sleep(config.interval)
else:
check_python_volume()
while len(tasks) > 0:
resources = KolejkaLimits()
resources.update(limits)
image_usage = dict()
processes = list()
cpus_offset = 0
gpus_offset = 0
for task in tasks:
if len(processes) >= config.concurency:
break
if task.exclusive and len(processes) > 0:
break
task.limits.update(limits)
task.limits.cpus_offset = cpus_offset
task.limits.gpus_offset = gpus_offset
ok = True
if resources.cpus is not None and task.limits.cpus > resources.cpus:
ok = False
if task.limits.gpus is not None and task.limits.gpus > 0:
if resources.gpus is None or task.limits.gpus > resources.gpus:
ok = False
if resources.memory is not None and task.limits.memory > resources.memory:
ok = False
if resources.gpus is not None:
if task.limits.gpus > resources.gpus:
ok = False
if resources.swap is not None and task.limits.swap > resources.swap:
ok = False
if resources.pids is not None and task.limits.pids > resources.pids:
ok = False
if resources.storage is not None and task.limits.storage > resources.storage:
ok = False
if resources.image is not None:
image_usage_add = max(image_usage.get(task.image, 0), task.limits.image) - image_usage.get(task.image, 0)
if image_usage_add > resources.image:
ok = False
if resources.workspace is not None and task.limits.workspace > resources.workspace:
ok = False
if ok:
proc = Process(target=foreman_single, args=(config.temp_path, task))
processes.append(proc)
cpus_offset += task.limits.cpus
if resources.cpus is not None:
resources.cpus -= task.limits.cpus
gpus_offset += task.limits.gpus
if resources.gpus is not None:
resources.gpus -= task.limits.gpus
if resources.memory is not None:
resources.memory -= task.limits.memory
if resources.swap is not None:
resources.swap -= task.limits.swap
if resources.pids is not None:
resources.pids -= task.limits.pids
if resources.storage is not None:
resources.storage -= task.limits.storage
if resources.image is not None:
resources.image -= image_usage_add
image_usage[task.image] = max(image_usage.get(task.image, 0), task.limits.image)
if resources.workspace is not None:
resources.workspace -= task.limits.workspace
tasks = tasks[1:]
if task.exclusive:
break
else:
break
if config.image is not None:
manage_images(
config.pull,
config.image,
image_usage,
[task.image for task in tasks]
)
for proc in processes:
proc.start()
for proc in processes:
proc.join()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(config.interval)
def config_parser(parser):
parser.add_argument('--auto-tags', type=bool, help='add automatically generated machine tags', default=True)
parser.add_argument('--pull', action='store_true', help='always pull images, even if local version is present', default=False)
parser.add_argument('--tags', type=str, help='comma separated list of machine tags')
parser.add_argument('--temp', type=str, help='temp folder')
parser.add_argument('--interval', type=float, help='dequeue interval (in seconds)')
parser.add_argument('--concurency', type=int, help='number of simultaneous tasks')
parser.add_argument('--cpus', type=int, help='cpus limit')
parser.add_argument('--memory', action=MemoryAction, help='memory limit')
parser.add_argument('--swap', action=MemoryAction, help='swap limit')
parser.add_argument('--pids', type=int, help='pids limit')
parser.add_argument('--storage', action=MemoryAction, help='storage limit')
parser.add_argument('--image', action=MemoryAction, help='image size limit')
parser.add_argument('--workspace', action=MemoryAction, help='workspace size limit')
parser.add_argument('--time', action=TimeAction, help='time limit')
parser.add_argument('--network', type=bool, help='allow netowrking')
parser.add_argument('--gpus', type=int, help='gpus limit')
def execute(args):
kolejka_config(args=args)
foreman()
parser.set_defaults(execute=execute)
|
client_active.py | # encoding: utf-8
"""
__auth__: 焦淑鹏
__require__: 模拟客户端,与它端进行交互
__version__: 无要求
"""
import socket
import ssl
import threading
import msgpack
import time
import sys
import datetime
from convert import Convert
from ..message_type import Message
from ..json_formatter import JsonFormatter
from ..setting import path
# from ..logger import Log
class ClientIM:
def __init__(self, **kwargs):
# 设置聊天host和port
self.config = kwargs.get("parameter")
self.uid = self.config['user_info']['uid']
self.message = kwargs.get('message')
self.chat_host = self.config['host']
self.chat_port = 8080
self.classCon = Convert(config=self.config)
self.num = 10
self.protocol = self.message['protocol']
# self.log = Log()
if '--verbose' in sys.argv:
self.verbose = True
else:
self.verbose = False
if self.chat_host[0] != 'h':
self.is_online = 1
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.is_online = 0
self.sock = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self.sock.connect((self.chat_host, self.chat_port))
def socket_im(self):
try:
send_data = self.classCon.arrays_comb(0b00011001)
if send_data == 0:
return
# 定义鉴权包
self.sock.send(bytearray(send_data))
# 发送鉴权包
reply = self.sock.recv(16)
get_header = self.header_byte(reply)
# 获取鉴权头
if get_header == '00010010': # 判断鉴权是否成功
print '鉴权成功'
threads = []
# 发送消息
send_msg = threading.Thread(target=self.send_msg, args=())
threads.append(send_msg)
# 接收消息
# 后期需要释放------------************------------
rec_msg = threading.Thread(target=self.rec_msg, args=())
threads.append(rec_msg)
# self.sync()
for thread in threads:
thread.setDaemon(False)
thread.start()
thread.join()
else:
print '连接断开'
return False
finally:
self.sock.close()
@staticmethod
def header_byte(reply): # 解析头部
get_reply = bytearray(reply)
head = bin(get_reply[0]).replace('0b', '').zfill(8)
return head
def ping_byte(self, reply):
header = reply[0]
if header == '00101001': # 心跳包只包含头
try:
data_byte = []
for num in range(8):
data = bin(reply[num]).replace('0b', '').zfill(8)
data_byte.append(data)
version = self.montage(data_byte[1])
message_len = self.montage(data_byte[2:6])
ping_time = self.montage(data_byte[6:8])
print '接收到服务器发送的心跳包,心跳包版本为%d,消息体长度为%d,间隔时间为%d秒' % (version, message_len, ping_time)
except:
# pass
print '接收到服务器发送的心跳包,心跳包仅包含header'
elif header == '00111010': # 消息送达成功
if len(reply) == 23:
header = reply[0]
version = int(reply[1], 2)
msg_lenth = int(''.join(reply[2:6]), 2)
res = int(reply[6], 2)
local_id = int(''.join(reply[7:11]), 2)
if res == 0:
print '消息成功送达,header为%s, 版本为%s, 消息长度为%s, local ID为%s' % (header, version, msg_lenth, local_id)
else:
print '消息送达失败, header为%s, 版本为%s, 消息长度为%s' % (header, version, msg_lenth)
else:
error_code = int(reply[6], 2)
print Message().send_error_code(error_code)
elif header == '00100001':
# print '接收到服务器不带有payloads PING'
ping_data = bytearray(self.classCon.ping(1))
self.sock.send(ping_data)
elif header == '01111010':
self.return_req(reply)
else:
self.msg_type_push(reply)
@staticmethod
def montage(values):
if type(values) == str:
result = values
else:
result = ''.join(values)
res = int(result, 2)
return res
def send_msg(self, robot=False):
if "is_robot" in self.message.keys():
if robot is True:
msg_data = bytearray(self.classCon.send_protocol(self.message))
self.sock.send(msg_data)
else:
while True:
if self.protocol == 'send':
contents = raw_input("请输入发送内容:\n")
self.message['contents'] = contents
msg_data = bytearray(self.classCon.send_protocol(self.message))
elif self.protocol == 'req':
msg_data = bytearray(self.classCon.req_protocol(self.message))
else:
print "消息发送失败"
msg_data = None
if msg_data is not None:
self.sock.send(msg_data)
if self.protocol == 'req':
break
else:
break
# 异常关闭socket
def sign_out(self):
self.sock.shutdown(2)
self.sock.close()
def rec_msg(self):
while True:
try:
recv_data = self.recv_basic()
except Exception, e:
if e.args[0] == 9:
print "关闭socket"
break
try:
header = recv_data[0]
except:
# print recv_data
break
if header == '00101001':
self.ping_byte(recv_data)
ping_data = bytearray(self.classCon.ping(1))
self.sock.send(ping_data)
elif header == '11101000':
value = int(recv_data[6])
print Message().connect_ack(value)
return False
else:
self.ping_byte(recv_data)
def sync(self):
msg_data = bytearray(self.classCon.sync())
self.sock.send(msg_data)
# def sync_only(self):
# # end = 83508
# # 最新的消息ID
# end = 82168
# for i in range(200):
# start = end - 10
# msg_data = bytearray(self.classCon.sync_only(start, end))
# self.sock.send(msg_data)
# end -= 10
# time.sleep(10)
def json_formt(self, data):
try:
data = str(data)
jf = JsonFormatter(json_data=data).render()[1:]
print "\033[1;31m %s" % jf
except:
print data
def recv_basic(self):
total_data = []
while True:
data = self.sock.recv(2048)
if not len(data):
break
data = bytearray(data)
for num in range(0, len(data)):
data_msg = bin(data[num]).replace('0b', '').zfill(8)
total_data.append(data_msg)
if len(total_data) > 2:
index = 2
if total_data[0] == '00100001' and total_data[1] == '01001000':
total_data = total_data[1:]
msg_len = int(''.join(total_data[index:index + 4]), 2) + 6
if len(total_data) == msg_len:
return total_data
if total_data[0] == '01001000' and total_data[1] == '00000011':
return total_data
else:
return total_data
def msg_type_push(self, reply):
index = 0
header = reply[0]
body = reply
if header == '01011110':
body = reply[16:]
for i in range(0, len(body)):
msg_body = int("".join(body[index + 2:index + 6]), 2) + 6
push_data = body[index:index + msg_body]
self.return_push(push_data)
index += msg_body
if index == len(body):
break
def return_push(self, push_data):
header = push_data[0]
if header == '01001000' and len(push_data) == 20:
print "对方查看了你的消息"
return
session = int(push_data[7], 2)
s_type = Message().session_list(session) # 会话类型
# messageId = int(''.join(push_data[12:20]), 2) # 消息ID
# messageTime = int(''.join(push_data[20:24]), 2) # 消息时间戳
other_uid = int(''.join(push_data[8:12]), 2)
bin_msg = ''.join(push_data[28:])
str_msg = msgpack.unpackb(Convert().restore(bin_msg))
print '当前时间:'+str(datetime.datetime.now())
print 'str_msg:'+str(str_msg)
name = ""
if 'profile' in str_msg.keys() and len(str_msg['profile']) != 0:
if 'name' in str_msg['profile'].keys():
name = str_msg['profile']['name']
elif 'from' in str_msg.keys():
name = str_msg['from']
if 'contents' in str_msg.keys():
contents=str_msg['contents']
else:
print 'contents失败'
# contents = str_msg['contents']
# print 'aaaa:'+contents
# 处理红包逻辑,如果收到主播红包,记录红包id
# if str_msg['type'] == 92:
#
# file = open('%s/req_http/hongbao/logs/hongbao_id.txt' % path, 'w')
# file.write(str_msg['extra']['hongbao_id'])
# file.close()
# self.sock.shutdown(2)
# self.sock.close()
m_type = Message().message_list(str_msg['type']) # 消息类型
if header == '01001100':
if str_msg['type'] == 9:
print '\033[1;32m 同步群组%s 会话类型为%s 群名称为%s 群简介为%s' % (
other_uid, s_type, str_msg['extra']['groups_name'], str_msg['extra']['groups_description'])
elif contents == '':
print '\033[1;32m 同步用户%s 会话类型为%s,消息类型为:%s' % (other_uid, s_type, m_type)
else:
print '\033[1;32m 同步用户 %s 会话类型为%s,消息类型为:%s 返回详情为:' % (
str_msg['profile']['name'], s_type, m_type)
self.json_formt(str_msg)
else:
print '\033[1;32m %s 给您发来%s,消息类型为:%s 返回详情为:' % (name, s_type, m_type)
self.json_formt(str_msg)
if "is_robot" in self.message.keys():
call_me = 0
robot_name = self.message['robot_name']
if s_type == "群消息":
if "@" + robot_name in contents:
call_me = 1
contents = contents.replace("@" + robot_name, "")
else:
import re
re_list = re.findall("@\((.*?)\)", contents)
if len(re_list) != 0 and robot_name in re_list[0]:
contents = contents.replace("@(" + re_list[0] + ")", "")
call_me = 1
message_type = str_msg['type']
try:
if other_uid in [5265956]:
return
if message_type == 9:
from robot_send_message import AutoReply
self.message = AutoReply(config=self.config, msg=str_msg,
message=self.message).group_invitation()
if "message_send" in self.message.keys():
self.send_msg(robot=True)
elif s_type == "私人消息" or call_me == 1:
from ..setting import ROBOT_URL
import json
import requests
data = {
'contents': contents,
"uid": other_uid,
"profile": str_msg['profile'],
"type": message_type,
"session_type": session
}
post_data = json.dumps(data)
response = json.loads(requests.post(url=ROBOT_URL, data=post_data).content)
response_data = response['data']
text = response_data['contents']
if session == 3:
from ..hashid import get_hashid
hashuid = get_hashid(int(str_msg['from']))
name = str_msg["profile"]["name"].decode('utf-8')
response_data['contents'] = u"@(name:%s,id:%s) " % (name, hashuid) + text
self.message['session_type'] = session
self.message = dict(self.message, **response_data)
keys = response_data.keys()
if 'special' not in keys and message_type in [1, 6]:
time.sleep(len(text) * 0.08)
self.send_msg(robot=True)
if 'image' in keys:
self.message['message_type'] = response_data['image']['message_type']
self.message['contents'] = response_data['image']['contents']
time.sleep(0.8)
self.send_msg(robot=True)
except:
pass
def return_req(self, push_data):
error_code = int(push_data[6], 2)
if error_code == 0:
bin_msg = ''.join(push_data[11:])
str_msg = msgpack.unpackb(Convert().restore(bin_msg))
if 'req_type' in str_msg.keys():
req_type = Message().req_type_list(str_msg['req_type'])
print '当前时间为:'+str(datetime.datetime.now())
print "\033[1;32m REQ类型为%s 请求成功,body为:" % req_type
if str_msg['req_type'] == 6:
# 获取直播间信息
self.message['req_type'] = 8
self.send_msg()
elif str_msg['req_type'] == 3:
self.message['req_type'] = 8
self.message['session_id'] = str_msg['session_id']
self.message['session_type'] = 4
self.send_msg()
self.json_formt(str_msg)
else:
print "请求失败,失败原因为: %s" % Message().req_error_code(error_code)
self.sign_out()
|
tello_video.py | import socket
import threading
import time
import numpy as np
import libh264decoder
class Tello:
"""Wrapper class to interact with the Tello drone."""
def __init__(self, local_ip, local_port, imperial=False, command_timeout=.3, tello_ip='192.168.10.1',tello_port=8889):
"""
Binds to the local IP/port and puts the Tello into command mode.
:param local_ip (str): Local IP address to bind.
:param local_port (int): Local port to bind.
:param imperial (bool): If True, speed is MPH and distance is feet.
If False, speed is KPH and distance is meters.
:param command_timeout (int|float): Number of seconds to wait for a response to a command.
:param tello_ip (str): Tello IP.
:param tello_port (int): Tello port.
"""
self.abort_flag = False
self.decoder = libh264decoder.H264Decoder()
self.command_timeout = command_timeout
self.imperial = imperial
self.response = None
self.frame = None # numpy array BGR -- current camera output frame
self.is_freeze = False # freeze current camera output
self.last_frame = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream
self.tello_address = (tello_ip, tello_port)
self.local_video_port = 11111 # port for receiving video stream
self.last_height = 0
self.socket.bind((local_ip, local_port))
# thread for receiving cmd ack
self.receive_thread = threading.Thread(target=self._receive_thread)
self.receive_thread.daemon = True
self.receive_thread.start()
# to receive video -- send cmd: command, streamon
self.socket.sendto(b'command', self.tello_address)
print ('sent: command')
self.socket.sendto(b'streamon', self.tello_address)
print ('sent: streamon')
self.socket_video.bind((local_ip, self.local_video_port))
# thread for receiving video
self.receive_video_thread = threading.Thread(target=self._receive_video_thread)
self.receive_video_thread.daemon = True
self.receive_video_thread.start()
def __del__(self):
"""Closes the local socket."""
self.socket.close()
self.socket_video.close()
def read(self):
"""Return the last frame from camera."""
if self.is_freeze:
return self.last_frame
else:
return self.frame
def video_freeze(self, is_freeze=True):
"""Pause video output -- set is_freeze to True"""
self.is_freeze = is_freeze
if is_freeze:
self.last_frame = self.frame
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
while True:
try:
self.response, ip = self.socket.recvfrom(3000)
#print(self.response)
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets self.frame to the most recent frame Tello captured.
"""
packet_data = ""
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460:
for frame in self._h264_decode(packet_data):
self.frame = frame
packet_data = ""
except socket.error as exc:
print ("Caught exception socket.error : %s" % exc)
def _h264_decode(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print 'frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls)
frame = np.fromstring(frame, dtype=np.ubyte, count=len(frame), sep='')
frame = (frame.reshape((h, ls / 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
def send_command(self, command):
"""
Send a command to the Tello and wait for a response.
:param command: Command to send.
:return (str): Response from Tello.
"""
print (">> send cmd: {}".format(command))
self.abort_flag = False
timer = threading.Timer(self.command_timeout, self.set_abort_flag)
self.socket.sendto(command.encode('utf-8'), self.tello_address)
timer.start()
while self.response is None:
if self.abort_flag is True:
break
timer.cancel()
if self.response is None:
response = 'none_response'
else:
response = self.response.decode('utf-8')
self.response = None
return response
def set_abort_flag(self):
"""
Sets self.abort_flag to True.
Used by the timer in Tello.send_command() to indicate to that a response
timeout has occurred.
"""
self.abort_flag = True
def takeoff(self):
"""
Initiates take-off.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('takeoff')
def set_speed(self, speed):
"""
Sets speed.
This method expects KPH or MPH. The Tello API expects speeds from
1 to 100 centimeters/second.
Metric: .1 to 3.6 KPH
Imperial: .1 to 2.2 MPH
Args:
speed (int|float): Speed.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
speed = float(speed)
if self.imperial is True:
speed = int(round(speed * 44.704))
else:
speed = int(round(speed * 27.7778))
return self.send_command('speed %s' % speed)
def rotate_cw(self, degrees):
"""
Rotates clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('cw %s' % degrees)
def rotate_ccw(self, degrees):
"""
Rotates counter-clockwise.
Args:
degrees (int): Degrees to rotate, 1 to 360.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('ccw %s' % degrees)
def flip(self, direction):
"""
Flips.
Args:
direction (str): Direction to flip, 'l', 'r', 'f', 'b'.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('flip %s' % direction)
def get_response(self):
"""
Returns response of tello.
Returns:
int: response of tello.
"""
response = self.response
return response
def get_height(self):
"""Returns height(dm) of tello.
Returns:
int: Height(dm) of tello.
"""
height = self.send_command('height?')
height = str(height)
height = filter(str.isdigit, height)
try:
height = int(height)
self.last_height = height
except:
height = self.last_height
pass
return height
def get_battery(self):
"""Returns percent battery life remaining.
Returns:
int: Percent battery life remaining.
"""
battery = self.send_command('battery?')
try:
battery = int(battery)
except:
pass
return battery
def get_flight_time(self):
"""Returns the number of seconds elapsed during flight.
Returns:
int: Seconds elapsed during flight.
"""
flight_time = self.send_command('time?')
try:
flight_time = int(flight_time)
except:
pass
return flight_time
def get_speed(self):
"""Returns the current speed.
Returns:
int: Current speed in KPH or MPH.
"""
speed = self.send_command('speed?')
try:
speed = float(speed)
if self.imperial is True:
speed = round((speed / 44.704), 1)
else:
speed = round((speed / 27.7778), 1)
except:
pass
return speed
def land(self):
"""Initiates landing.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.send_command('land')
def move(self, direction, distance):
"""Moves in a direction for a distance.
This method expects meters or feet. The Tello API expects distances
from 20 to 500 centimeters.
Metric: .02 to 5 meters
Imperial: .7 to 16.4 feet
Args:
direction (str): Direction to move, 'forward', 'back', 'right' or 'left'.
distance (int|float): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
distance = float(distance)
if self.imperial is True:
distance = int(round(distance * 30.48))
else:
distance = int(round(distance * 100))
return self.send_command('%s %s' % (direction, distance))
def move_backward(self, distance):
"""Moves backward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('back', distance)
def move_down(self, distance):
"""Moves down for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('down', distance)
def move_forward(self, distance):
"""Moves forward for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('forward', distance)
def move_left(self, distance):
"""Moves left for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('left', distance)
def move_right(self, distance):
"""Moves right for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
"""
return self.move('right', distance)
def move_up(self, distance):
"""Moves up for a distance.
See comments for Tello.move().
Args:
distance (int): Distance to move.
Returns:
str: Response from Tello, 'OK' or 'FALSE'.
"""
return self.move('up', distance)
|
codec.py | #
# Copyright (c) 2016-2017, The OpenThread Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Module providing a Spienl coder / decoder class.
"""
import binascii
import time
import logging
import threading
import traceback
import queue
import importlib
from struct import pack
from struct import unpack
from collections import namedtuple
from collections import defaultdict
import ipaddress
import spinel.util as util
import spinel.config as CONFIG
from spinel.const import kThread
from spinel.const import SPINEL
from spinel.const import SPINEL_LAST_STATUS_MAP
from spinel.hdlc import Hdlc
FEATURE_USE_HDLC = 1
TIMEOUT_PROP = 5
#=========================================
# SpinelCodec
#=========================================
# 0: DATATYPE_NULL
#'.': DATATYPE_VOID: Empty data type. Used internally.
#'b': DATATYPE_BOOL: Boolean value. Encoded in 8-bits as either 0x00 or 0x01.
# All other values are illegal.
#'C': DATATYPE_UINT8: Unsigned 8-bit integer.
#'c': DATATYPE_INT8: Signed 8-bit integer.
#'S': DATATYPE_UINT16: Unsigned 16-bit integer. (Little-endian)
#'s': DATATYPE_INT16: Signed 16-bit integer. (Little-endian)
#'L': DATATYPE_UINT32: Unsigned 32-bit integer. (Little-endian)
#'l': DATATYPE_INT32: Signed 32-bit integer. (Little-endian)
#'i': DATATYPE_UINT_PACKED: Packed Unsigned Integer. (See section 7.2)
#'6': DATATYPE_IPv6ADDR: IPv6 Address. (Big-endian)
#'E': DATATYPE_EUI64: EUI-64 Address. (Big-endian)
#'e': DATATYPE_EUI48: EUI-48 Address. (Big-endian)
#'D': DATATYPE_DATA: Arbitrary Data. (See section 7.3)
#'d': DATATYPE_DATA_WLEN: Arbitrary Data with Prepended Length. (See section 7.3)
#'U': DATATYPE_UTF8: Zero-terminated UTF8-encoded string.
#'t': DATATYPE_STRUCT: Structured datatype. Compound type. Length prepended. (See section 7.4)
#'A': DATATYPE_ARRAY: Array of datatypes. Compound type. (See section 7.5)
class SpinelCodec(object):
""" A general coder / decoder class for Spinel protocol. """
@classmethod
def parse_b(cls, payload):
return unpack("<B", payload[:1])[0]
@classmethod
def parse_c(cls, payload):
return unpack("<b", payload[:1])[0]
@classmethod
def parse_C(cls, payload):
return unpack("<B", payload[:1])[0]
@classmethod
def parse_s(cls, payload):
return unpack("<h", payload[:2])[0]
@classmethod
def parse_S(cls, payload):
return unpack("<H", payload[:2])[0]
@classmethod
def parse_l(cls, payload):
return unpack("<l", payload[:4])[0]
@classmethod
def parse_L(cls, payload):
return unpack("<L", payload[:4])[0]
@classmethod
def parse_X(cls, payload):
return unpack("<Q", payload[:8])[0]
@classmethod
def parse_6(cls, payload):
return payload[:16]
@classmethod
def parse_E(cls, payload):
return payload[:8]
@classmethod
def parse_e(cls, payload):
return payload[:6]
@classmethod
def parse_U(cls, payload):
payload = payload.decode('utf-8')
nullchar = '\0'
if payload.find(nullchar) >= 0:
return payload[:payload.index(nullchar)] # strip null
else:
return payload
@classmethod
def parse_D(cls, payload):
return payload
@classmethod
def parse_d(cls, payload):
return payload[2:2 + unpack("<H", payload[:2])[0]]
@classmethod
def parse_i(cls, payload):
""" Decode EXI integer format. """
value = 0
value_len = 0
value_mul = 1
while value_len < 4:
byte = payload[value_len]
value += (byte & 0x7F) * value_mul
if byte < 0x80:
break
value_mul *= 0x80
value_len += 1
return (value, value_len + 1)
@classmethod
def parse_i_len(cls, payload):
""" Decode length of EXI integer format. """
return cls.parse_i(payload)[1]
@classmethod
def index_of_ending_brace(cls, spinel_format, idx):
""" Determines the index of the matching closing brace. """
count = 1
orig_idx = idx
while count > 0 and idx < len(spinel_format) - 1:
idx += 1
if spinel_format[idx] == ')':
count -= 1
elif spinel_format[idx] == '(':
count += 1
if count != 0:
raise ValueError('Unbalanced parenthesis in format string "' +
spinel_format + '", idx=' + idx)
return idx
@classmethod
def parse_field(cls, payload, spinel_format):
map_decode = {
'b': cls.parse_b,
'c': cls.parse_c,
'C': cls.parse_C,
's': cls.parse_s,
'S': cls.parse_S,
'L': cls.parse_L,
'l': cls.parse_l,
'6': cls.parse_6,
'X': cls.parse_X,
'E': cls.parse_E,
'e': cls.parse_e,
'U': cls.parse_U,
'D': cls.parse_D,
'd': cls.parse_d,
'i': cls.parse_i,
}
try:
return map_decode[spinel_format[0]](payload)
except KeyError:
print(traceback.format_exc())
return None
@classmethod
def get_payload_size(cls, payload, spinel_format):
map_lengths = {
'b': 1,
'c': 1,
'C': 1,
's': 2,
'S': 2,
'l': 4,
'L': 4,
'6': 16,
'X': 8,
'E': 8,
'e': 6,
}
result = 0
idx = 0
while idx < len(spinel_format):
format = spinel_format[idx]
if format == 't':
if spinel_format[idx + 1] != '(':
raise ValueError('Invalid structure format')
struct_end = cls.index_of_ending_brace(spinel_format, idx + 1)
result += 2 + cls.parse_S(payload[result:])
idx = struct_end + 1
elif format == 'd':
result += 2 + cls.parse_S(payload[result:])
idx += 1
elif format == 'D' or format == 'A':
if idx != len(spinel_format) - 1:
raise ValueError('Invalid type syntax for "' + format +
'", must go at end of format string')
result = len(payload)
idx += 1
elif format == 'U':
result += payload[result:].index(0) + 1
idx += 1
elif format == 'i':
result += cls.parse_i_len(payload[result:])
idx += 1
else:
result += map_lengths[format]
idx += 1
return result
@classmethod
def parse_fields(cls, payload, spinel_format):
result = []
idx = 0
while idx < len(spinel_format):
format = spinel_format[idx]
if format == 'A':
if spinel_format[idx + 1] != '(':
raise ValueError('Invalid structure format')
array_end = cls.index_of_ending_brace(spinel_format, idx + 1)
array_format = spinel_format[idx + 2:array_end]
array = []
while len(payload):
array.append(cls.parse_fields(payload, array_format))
payload = payload[cls.
get_payload_size(payload, array_format):]
result.append(tuple(array))
idx = array_end + 1
elif format == 't':
if spinel_format[idx + 1] != '(':
raise ValueError('Invalid structure format')
struct_end = cls.index_of_ending_brace(spinel_format, idx + 1)
struct_format = spinel_format[idx + 2:struct_end]
struct_len = cls.parse_S(payload)
result.append(
cls.parse_fields(payload[2:struct_len + 2], struct_format))
payload = payload[struct_len + 2:]
idx = struct_end + 1
else:
result.append(cls.parse_field(payload, format))
payload = payload[cls.get_payload_size(payload, format):]
idx += 1
return tuple(result)
@classmethod
def encode_i(cls, data):
""" Encode EXI integer format. """
result = bytes()
while data:
value = data & 0x7F
data >>= 7
if data:
value |= 0x80
result = result + pack("<B", value)
return result
@classmethod
def encode_b(cls, value):
return pack('B', value)
@classmethod
def encode_c(cls, value):
return pack('B', value)
@classmethod
def encode_C(cls, value):
return pack('B', value)
@classmethod
def encode_s(cls, value):
return pack('<h', value)
@classmethod
def encode_S(cls, value):
return pack('<H', value)
@classmethod
def encode_l(cls, value):
return pack('<l', value)
@classmethod
def encode_L(cls, value):
return pack('<L', value)
@classmethod
def encode_6(cls, value):
return value[:16]
@classmethod
def encode_E(cls, value):
return value[:8]
@classmethod
def encode_e(cls, value):
return value[:6]
@classmethod
def encode_U(cls, value):
return value + '\0'
@classmethod
def encode_D(cls, value):
return value
@classmethod
def encode_d(cls, value):
return cls.encode_S(len(value)) + value
@classmethod
def encode_field(cls, code, value):
map_encode = {
'b': cls.encode_b,
'c': cls.encode_c,
'C': cls.encode_C,
's': cls.encode_s,
'S': cls.encode_S,
'L': cls.encode_L,
'l': cls.encode_l,
'6': cls.encode_6,
'E': cls.encode_E,
'e': cls.encode_e,
'U': cls.encode_U,
'D': cls.encode_D,
'd': cls.encode_d,
'i': cls.encode_i,
}
try:
return map_encode[code](value)
except KeyError:
print(traceback.format_exc())
return None
def next_code(self, spinel_format):
code = spinel_format[0]
spinel_format = spinel_format[1:]
# TODO: Handle T() and A()
return code, spinel_format
def encode_fields(self, spinel_format, *fields):
packed = bytes()
for field in fields:
code, spinel_format = self.next_code(spinel_format)
if not code:
break
packed += self.encode_field(code, field)
return packed
def encode_packet(self,
command_id,
payload=bytes(),
tid=SPINEL.HEADER_DEFAULT):
""" Encode the given payload as a Spinel frame. """
header = pack(">B", tid)
cmd = self.encode_i(command_id)
pkt = header + cmd + payload
return pkt
#=========================================
class SpinelPropertyHandler(SpinelCodec):
def LAST_STATUS(self, _, payload):
return self.parse_i(payload)[0]
def PROTOCOL_VERSION(self, _wpan_api, payload):
return self.parse_U(payload)
def NCP_VERSION(self, _, payload):
return self.parse_U(payload)
def INTERFACE_TYPE(self, _, payload):
return self.parse_i(payload)[0]
def HWADDR(self, _, payload):
return self.parse_E(payload)
def PHY_CCA_THRESHOLD(self, _, payload):
return self.parse_c(payload)
def PHY_TX_POWER(self, _, payload):
return self.parse_c(payload)
def MAC_15_4_PANID(self, _, payload):
return self.parse_S(payload)
def NET_IF_UP(self, _, payload):
return self.parse_b(payload)
def NET_STACK_UP(self, _, payload):
return self.parse_C(payload)
def NET_ROLE(self, _, payload):
return self.parse_C(payload)
def NET_NETWORK_NAME(self, _, payload):
return self.parse_U(payload)
def PROP_PHY_REGION(self, _, payload):
return self.parse_C(payload)
def PROP_PHY_MODE_ID(self, _, payload):
return self.parse_C(payload)
def PROP_PHY_UNICAST_CHANNEL_LIST(self, _, payload):
return self.parse_D(payload)
def PROP_PHY_BROADCAST_CHANNEL_LIST(self, _, payload):
return self.parse_D(payload)
def PROP_PHY_ASYNC_CHANNEL_LIST(self, _, payload):
return self.parse_D(payload)
def PROP_NET_STATE(self, _, payload):
return self.parse_C(payload)
def PROP_PARENT_LIST(self, _, payload):
return self.parse_D(payload)
def PROP_ROUTING_COST(self, _, payload):
return self.parse_C(payload)
def PROP_ROUTING_TABLE_UPDATE(self, _, payload):
return self.parse_D(payload)
def PROP_DODAG_ROUTE(self, _, payload):
return self.parse_D(payload)
def PROP_PHY_CH_SPACING(self, _, payload):
return self.parse_D(payload)
def PROP_PHY_CHO_CENTER_FREQ(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_UC_DWELL_INTERVAL(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_BC_DWELL_INTERVAL(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_BC_INTERVAL(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_UC_CHANNEL_FUNCTION(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_BC_CHANNEL_FUNCTION(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_MAC_FILTER_LIST(self, _, payload):
return self.parse_D(payload)
def PROP_MAC_FILTER_MODE(self, _, payload):
return self.parse_C(payload)
def PROP_REVOKE_GTK_HWADDR(self, _, payload):
return self.parse_E(payload)
def __init__(self):
self.autoAddresses = set()
self.wpan_api = None
self.__queue_prefix = queue.Queue()
self.prefix_thread = threading.Thread(target=self.__run_prefix_handler)
self.prefix_thread.setDaemon(True)
self.prefix_thread.start()
def handle_prefix_change(self, payload):
""" Automatically ipaddr add / remove addresses for each new prefix. """
# As done by cli.cpp Interpreter::HandleNetifStateChanged
# First parse payload and extract slaac prefix information.
pay = payload
Prefix = namedtuple("Prefix", "prefix prefixlen stable flags is_local")
prefixes = []
slaacPrefixSet = set()
while len(pay) >= 22:
(_structlen) = unpack('<H', pay[:2])
struct_len = _structlen[0]
pay = pay[2:]
prefix = Prefix(*unpack('16sBBBB', pay[:20]))
if prefix.flags & kThread.PrefixSlaacFlag:
net6 = ipaddress.IPv6Network(prefix.prefix)
net6 = net6.supernet(new_prefix=prefix.prefixlen)
slaacPrefixSet.add(net6)
prefixes.append(prefix)
pay = pay[struct_len:]
if CONFIG.DEBUG_LOG_PROP:
print("\n========= PREFIX ============")
print("ipaddrs: " + str(self.autoAddresses))
print("slaac prefix set: " + str(slaacPrefixSet))
print("==============================\n")
def __run_prefix_handler(self):
while 1:
(wpan_api, payload) = self.__queue_prefix.get(True)
self.wpan_api = wpan_api
self.handle_prefix_change(payload)
self.__queue_prefix.task_done()
def DODAG_ROUTE_DEST(self, _, payload):
return self.parse_D(payload)
def DODAG_ROUTE(self, _, payload):
return self.parse_D(payload)
def NUM_CONNECTED_DEVICES(self, _, payload):
return self.parse_S(payload)
def CONNECTED_DEVICES(self, _, payload):
return self.parse_D(payload)
def IPV6_ADDRESS_TABLE(self, _, payload):
return self.parse_D(payload)
def MULTICAST_LIST(self, _, payload):
return self.parse_D(payload)
def STREAM_NET(self, _, payload):
return self.parse_d(payload)
#=========================================
class SpinelCommandHandler(SpinelCodec):
def handle_prop(self, wpan_api, name, payload, tid):
(prop_id, prop_len) = self.parse_i(payload)
if prop_id in SPINEL_PROP_DISPATCH:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
prop_value = handler(wpan_api, payload[prop_len:])
if CONFIG.DEBUG_LOG_PROP:
# Generic output
if isinstance(prop_value, str):
prop_value_str = util.hexify_str(prop_value)
CONFIG.LOGGER.debug("PROP_VALUE_%s [tid=%d]: %s = %s", name,
(tid & 0xF), prop_name, prop_value_str)
else:
prop_value_str = str(prop_value)
CONFIG.LOGGER.debug("PROP_VALUE_%s [tid=%d]: %s = %s", name,
(tid & 0xF), prop_name, prop_value_str)
# Extend output for certain properties.
if prop_id == SPINEL.PROP_LAST_STATUS:
CONFIG.LOGGER.debug(SPINEL_LAST_STATUS_MAP[prop_value])
if CONFIG.DEBUG_LOG_PKT:
if ((prop_id == SPINEL.PROP_STREAM_NET) or
(prop_id == SPINEL.PROP_STREAM_NET_INSECURE)):
CONFIG.LOGGER.debug("PROP_VALUE_" + name + ": " + prop_name)
elif prop_id == SPINEL.PROP_STREAM_DEBUG:
CONFIG.LOGGER.debug("DEBUG: " + prop_value)
if wpan_api:
wpan_api.queue_add(prop_id, prop_value, tid)
else:
print("no wpan_api")
elif CONFIG.DEBUG_LOG_PROP:
prop_name = "Property Unknown"
CONFIG.LOGGER.info("\n%s (%i): ", prop_name, prop_id)
def PROP_VALUE_IS(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "IS", payload, tid)
def PROP_VALUE_INSERTED(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "INSERTED", payload, tid)
def PROP_VALUE_REMOVED(self, wpan_api, payload, tid):
self.handle_prop(wpan_api, "REMOVED", payload, tid)
WPAN_CMD_HANDLER = SpinelCommandHandler()
SPINEL_COMMAND_DISPATCH = {
SPINEL.RSP_PROP_VALUE_IS: WPAN_CMD_HANDLER.PROP_VALUE_IS,
SPINEL.RSP_PROP_VALUE_INSERTED: WPAN_CMD_HANDLER.PROP_VALUE_INSERTED,
SPINEL.RSP_PROP_VALUE_REMOVED: WPAN_CMD_HANDLER.PROP_VALUE_REMOVED,
}
WPAN_PROP_HANDLER = SpinelPropertyHandler()
SPINEL_PROP_DISPATCH = {
SPINEL.PROP_LAST_STATUS:
WPAN_PROP_HANDLER.LAST_STATUS,
SPINEL.PROP_PROTOCOL_VERSION:
WPAN_PROP_HANDLER.PROTOCOL_VERSION,
SPINEL.PROP_NCP_VERSION:
WPAN_PROP_HANDLER.NCP_VERSION,
SPINEL.PROP_INTERFACE_TYPE:
WPAN_PROP_HANDLER.INTERFACE_TYPE,
SPINEL.PROP_HWADDR:
WPAN_PROP_HANDLER.HWADDR,
SPINEL.PROP_PHY_CCA_THRESHOLD:
WPAN_PROP_HANDLER.PHY_CCA_THRESHOLD,
SPINEL.PROP_PHY_TX_POWER:
WPAN_PROP_HANDLER.PHY_TX_POWER,
SPINEL.PROP_MAC_15_4_PANID:
WPAN_PROP_HANDLER.MAC_15_4_PANID,
SPINEL.PROP_NET_IF_UP:
WPAN_PROP_HANDLER.NET_IF_UP,
SPINEL.PROP_NET_STACK_UP:
WPAN_PROP_HANDLER.NET_STACK_UP,
SPINEL.PROP_NET_ROLE:
WPAN_PROP_HANDLER.NET_ROLE,
SPINEL.PROP_NET_NETWORK_NAME:
WPAN_PROP_HANDLER.NET_NETWORK_NAME,
SPINEL.PROP_PHY_REGION:
WPAN_PROP_HANDLER.PROP_PHY_REGION,
SPINEL.PROP_PHY_MODE_ID:
WPAN_PROP_HANDLER.PROP_PHY_MODE_ID,
SPINEL.PROP_PHY_UNICAST_CHANNEL_LIST:
WPAN_PROP_HANDLER.PROP_PHY_UNICAST_CHANNEL_LIST,
SPINEL.PROP_PHY_BROADCAST_CHANNEL_LIST:
WPAN_PROP_HANDLER.PROP_PHY_BROADCAST_CHANNEL_LIST,
SPINEL.PROP_PHY_ASYNC_CHANNEL_LIST:
WPAN_PROP_HANDLER.PROP_PHY_ASYNC_CHANNEL_LIST,
SPINEL.PROP_NET_STATE:
WPAN_PROP_HANDLER.PROP_NET_STATE,
SPINEL.PROP_PARENT_LIST:
WPAN_PROP_HANDLER.PROP_PARENT_LIST,
SPINEL.PROP_ROUTING_COST:
WPAN_PROP_HANDLER.PROP_ROUTING_COST,
SPINEL.PROP_ROUTING_TABLE_UPDATE:
WPAN_PROP_HANDLER.PROP_ROUTING_TABLE_UPDATE,
SPINEL.PROP_DODAG_ROUTE:
WPAN_PROP_HANDLER.PROP_DODAG_ROUTE,
SPINEL.PROP_PHY_CH_SPACING:
WPAN_PROP_HANDLER.PROP_PHY_CH_SPACING,
SPINEL.PROP_PHY_CHO_CENTER_FREQ:
WPAN_PROP_HANDLER.PROP_PHY_CHO_CENTER_FREQ,
SPINEL.PROP_MAC_UC_DWELL_INTERVAL:
WPAN_PROP_HANDLER.PROP_MAC_UC_DWELL_INTERVAL,
SPINEL.PROP_MAC_BC_DWELL_INTERVAL:
WPAN_PROP_HANDLER.PROP_MAC_BC_DWELL_INTERVAL,
SPINEL.PROP_MAC_BC_INTERVAL:
WPAN_PROP_HANDLER.PROP_MAC_BC_INTERVAL,
SPINEL.PROP_MAC_UC_CHANNEL_FUNCTION:
WPAN_PROP_HANDLER.PROP_MAC_UC_CHANNEL_FUNCTION,
SPINEL.PROP_MAC_BC_CHANNEL_FUNCTION:
WPAN_PROP_HANDLER.PROP_MAC_BC_CHANNEL_FUNCTION,
SPINEL.PROP_MAC_MAC_FILTER_LIST:
WPAN_PROP_HANDLER.PROP_MAC_MAC_FILTER_LIST,
SPINEL.PROP_MAC_FILTER_MODE:
WPAN_PROP_HANDLER.PROP_MAC_FILTER_MODE,
SPINEL.PROP_DODAG_ROUTE_DEST:
WPAN_PROP_HANDLER.DODAG_ROUTE_DEST,
SPINEL.PROP_DODAG_ROUTE:
WPAN_PROP_HANDLER.DODAG_ROUTE,
SPINEL.PROP_NUM_CONNECTED_DEVICES:
WPAN_PROP_HANDLER.NUM_CONNECTED_DEVICES,
SPINEL.PROP_CONNECTED_DEVICES:
WPAN_PROP_HANDLER.CONNECTED_DEVICES,
SPINEL.PROP_REVOKE_GTK_HWADDR:
WPAN_PROP_HANDLER.PROP_REVOKE_GTK_HWADDR,
SPINEL.PROP_IPV6_ADDRESS_TABLE:
WPAN_PROP_HANDLER.IPV6_ADDRESS_TABLE,
SPINEL.PROP_MULTICAST_LIST:
WPAN_PROP_HANDLER.MULTICAST_LIST,
SPINEL.PROP_STREAM_NET:
WPAN_PROP_HANDLER.STREAM_NET,
}
class WpanApi(SpinelCodec):
""" Helper class to format wpan command packets """
def __init__(self,
stream,
nodeid,
use_hdlc=FEATURE_USE_HDLC,
timeout=TIMEOUT_PROP,
vendor_module=None):
self.stream = stream
self.nodeid = nodeid
self.timeout = timeout
self.use_hdlc = use_hdlc
if self.use_hdlc:
self.hdlc = Hdlc(self.stream)
if vendor_module:
# Hook vendor properties
try:
codec = importlib.import_module(vendor_module + '.codec')
SPINEL_PROP_DISPATCH.update(codec.VENDOR_SPINEL_PROP_DISPATCH)
except ImportError:
pass
# PARSER state
self.rx_pkt = []
self.callback = defaultdict(list) # Map prop_id to list of callbacks.
# Fire up threads
self._reader_alive = True
self.tid_filter = set()
self.__queue_prop = defaultdict(queue.Queue) # Map tid to Queue.
self.queue_register()
self.__start_reader()
def __del__(self):
self._reader_alive = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._reader_alive = False
def __start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.stream_rx)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def transact(self, command_id, payload=bytes(), tid=SPINEL.HEADER_DEFAULT):
pkt = self.encode_packet(command_id, payload, tid)
if CONFIG.DEBUG_LOG_SERIAL:
msg = "TX Pay: (%i) %s " % (len(pkt),
binascii.hexlify(pkt).decode('utf-8'))
CONFIG.LOGGER.debug(msg)
if self.use_hdlc:
pkt = self.hdlc.encode(pkt)
self.stream_tx(pkt)
def parse_rx(self, pkt):
if not pkt:
return
if CONFIG.DEBUG_LOG_SERIAL:
msg = "RX Pay: (%i) %s " % (len(pkt),
binascii.hexlify(pkt).decode('utf-8'))
CONFIG.LOGGER.debug(msg)
length = len(pkt) - 2
if length < 0:
return
spkt = pkt
#if not isinstance(spkt, str):
# spkt = "".join(map(chr, spkt))
tid = self.parse_C(spkt[:1])
(cmd_id, cmd_length) = self.parse_i(spkt[1:])
pay_start = cmd_length + 1
payload = spkt[pay_start:]
try:
handler = SPINEL_COMMAND_DISPATCH[cmd_id]
cmd_name = handler.__name__
handler(self, payload, tid)
except Exception as _ex:
print(traceback.format_exc())
cmd_name = "CB_Unknown"
CONFIG.LOGGER.info("\n%s (%i): ", cmd_name, cmd_id)
if CONFIG.DEBUG_CMD_RESPONSE:
CONFIG.LOGGER.info("\n%s (%i): ", cmd_name, cmd_id)
CONFIG.LOGGER.info("===> %s",
binascii.hexlify(payload).decode('utf-8'))
def stream_tx(self, pkt):
# Encapsulate lagging and Framer support in self.stream class.
self.stream.write(pkt)
def stream_rx(self):
""" Recieve thread and parser. """
try:
while self._reader_alive:
if self.use_hdlc:
self.rx_pkt = self.hdlc.collect()
else:
# size=None: Assume stream will always deliver packets
pkt = self.stream.read(None)
self.rx_pkt = util.packed_to_array(pkt)
self.parse_rx(self.rx_pkt)
except:
if self._reader_alive:
raise
else:
# Ignore the error since we are exiting
pass
class PropertyItem(object):
""" Queue item for NCP response to property commands. """
def __init__(self, prop, value, tid):
self.prop = prop
self.value = value
self.tid = tid
def callback_register(self, prop, cb):
self.callback[prop].append(cb)
def queue_register(self, tid=SPINEL.HEADER_DEFAULT):
self.tid_filter.add(tid)
return self.__queue_prop[tid]
def queue_wait_prepare(self, _prop_id, tid=SPINEL.HEADER_DEFAULT):
self.queue_clear(tid)
def queue_add(self, prop, value, tid):
cb_list = self.callback[prop]
# Asynchronous handlers can consume message and not add to queue.
if len(cb_list) > 0:
consumed = cb_list[0](prop, value, tid)
if consumed:
return
if tid not in self.tid_filter:
return
item = self.PropertyItem(prop, value, tid)
self.__queue_prop[tid].put_nowait(item)
def queue_clear(self, tid):
with self.__queue_prop[tid].mutex:
self.__queue_prop[tid].queue.clear()
def queue_get(self, tid, timeout=None):
try:
if (timeout):
item = self.__queue_prop[tid].get(True, timeout)
else:
item = self.__queue_prop[tid].get_nowait()
except queue.Empty:
item = None
return item
def queue_wait_for_prop(self,
_prop,
tid=SPINEL.HEADER_DEFAULT,
timeout=None):
if _prop is None:
return None
if timeout is None:
timeout = self.timeout
processed_queue = queue.Queue()
timeout_time = time.time() + timeout
while time.time() < timeout_time:
item = self.queue_get(tid, timeout_time - time.time())
if item is None:
continue
if item.prop == _prop:
break
processed_queue.put_nowait(item)
else:
item = None
# To make sure that all received properties will be processed in the same order.
with self.__queue_prop[tid].mutex:
while self.__queue_prop[tid]._qsize() > 0:
processed_queue.put(self.__queue_prop[tid]._get())
while not processed_queue.empty():
self.__queue_prop[tid]._put(processed_queue.get_nowait())
return item
def ip_send(self, pkt):
pay = self.encode_i(SPINEL.PROP_STREAM_NET)
pkt_len = len(pkt)
pay += pack("<H", pkt_len) # Start with length of IPv6 packet
pkt_len += 2 # Increment to include length word
pay += pkt # Append packet after length
self.transact(SPINEL.CMD_PROP_VALUE_SET, pay, SPINEL.HEADER_ASYNC)
def chlist_send(self, pkt, chlist):
pay = self.encode_i(chlist)
pkt_len = len(pkt)
pay += pack("<H", pkt_len) # Start with length of chlist
pkt_len += 2 # Increment to include length word
pay += pkt # Append packet after length
self.transact(SPINEL.CMD_PROP_VALUE_SET, pay, SPINEL.HEADER_ASYNC)
def cmd_reset(self):
self.queue_wait_prepare(None, SPINEL.HEADER_ASYNC)
self.transact(SPINEL.CMD_RESET)
result = self.queue_wait_for_prop(SPINEL.PROP_LAST_STATUS,
SPINEL.HEADER_ASYNC)
return (result is not None and result.value == 114)
def cmd_nverase(self):
self.queue_wait_prepare(None, SPINEL.HEADER_ASYNC)
self.transact(SPINEL.CMD_NVERASE)
def cmd_send(self, command_id, payload=bytes(), tid=SPINEL.HEADER_DEFAULT):
self.queue_wait_prepare(None, tid)
self.transact(command_id, payload, tid)
self.queue_wait_for_prop(None, tid)
def prop_change_async(self,
cmd,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
pay = self.encode_i(prop_id)
if py_format != None:
pay += pack(py_format, value)
self.transact(cmd, pay, tid)
def prop_insert_async(self,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
self.prop_change_async(SPINEL.CMD_PROP_VALUE_INSERT, prop_id, value,
py_format, tid)
def prop_remove_async(self,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
self.prop_change_async(SPINEL.CMD_PROP_VALUE_REMOVE, prop_id, value,
py_format, tid)
def __prop_change_value(self,
cmd,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Utility routine to change a property value over SPINEL. """
self.queue_wait_prepare(prop_id, tid)
pay = self.encode_i(prop_id)
if py_format != None:
pay += pack(py_format, value)
self.transact(cmd, pay, tid)
result = self.queue_wait_for_prop(prop_id, tid)
if result:
return result.value
else:
return None
def prop_get_value(self, prop_id, tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to get a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_GET [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_GET, prop_id,
None, None, tid)
def prop_set_value(self,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to set a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_SET [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_SET, prop_id,
value, py_format, tid)
def prop_insert_value(self,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to insert a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_INSERT [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_INSERT, prop_id,
value, py_format, tid)
def prop_remove_value(self,
prop_id,
value,
py_format='B',
tid=SPINEL.HEADER_DEFAULT):
""" Blocking routine to remove a property value over SPINEL. """
if CONFIG.DEBUG_LOG_PROP:
handler = SPINEL_PROP_DISPATCH[prop_id]
prop_name = handler.__name__
print("PROP_VALUE_REMOVE [tid=%d]: %s" % (tid & 0xF, prop_name))
return self.__prop_change_value(SPINEL.CMD_PROP_VALUE_REMOVE, prop_id,
value, py_format, tid)
def get_ipaddrs(self, tid=SPINEL.HEADER_DEFAULT):
"""
Return current list of ip addresses for the device.
"""
value = self.prop_get_value(SPINEL.PROP_IPV6_ADDRESS_TABLE, tid)
# TODO: clean up table parsing to be less hard-coded magic.
if value is None:
return None
size = 0x1B
addrs = [value[i:i + size] for i in range(0, len(value), size)]
ipaddrs = []
for addr in addrs:
addr = addr[2:18]
ipaddrs.append(ipaddress.IPv6Address(addr))
return ipaddrs
|
pfp.py | '''
Refer to https://docs.python.org/3.8/library/multiprocessing.html
'''
import multiprocessing
class MultiProcessor:
def __init__(self, context: dict=None):
self.functions = list()
self.context = context
self.results = list()
self.function_args = dict()
def register_function(self, f, f_args: dict=dict()):
'''
The function
'''
self.functions.append(f)
self.function_args[f.__name__] = f_args
def execute_parallel(self):
multiprocessing.set_start_method('spawn')
q = multiprocessing.Queue()
for function in self.functions:
f_args = self.function_args[function.__name__]
p = multiprocessing.Process(target=function, args=(q, self.context, f_args, ))
p.start()
self.results.append(q.get())
# EOF
|
recordings.py | import logging
import sqlalchemy
import shortuuid
import json
import os
import time
import asyncio
import threading
import shutil
from datetime import datetime, timedelta, timezone
from rx.subject import Subject
from db import get_db, metadata
from config import LOCAL_STORAGE_PATH, REMOTE_STORAGE_PATH
from controllers.controller import Controller
from controllers.participants import participant_controller
from controllers.cameras import camera_controller
from controllers.studies import studies_controller
from gpiozero import LED
#trigger for TMSi Porti, pin1 = 3.3V (GND = pin6)
trigger = LED("GPIO21") #LED("BOARD40")
led_red = LED("GPIO5") #LED("BOARD29")
led_yellow = LED("GPIO6") #LED("BOARD31")
led_green = LED("GPIO13") #LED("BOARD33")
led_blue = LED("GPIO19") #LED("BOARD35")
led_white = LED("GPIO26") #LED("BOARD")
logging.basicConfig(level=logging.DEBUG,
format='%(relativeCreated)6d %(threadName)s %(message)s')
logger = logging.getLogger()
database = get_db()
recordings = sqlalchemy.Table('recordings', metadata,
sqlalchemy.Column(
'id', sqlalchemy.String, primary_key=True),
sqlalchemy.Column(
'participant_id', sqlalchemy.ForeignKey('participants.id')),
sqlalchemy.Column('name', sqlalchemy.String),
sqlalchemy.Column(
'file_path', sqlalchemy.String),
sqlalchemy.Column(
'start_time', sqlalchemy.String),
sqlalchemy.Column('end_time', sqlalchemy.String),
sqlalchemy.Column('state', sqlalchemy.String),
sqlalchemy.Column(
'cameras_recorded', sqlalchemy.String),
sqlalchemy.Column(
'cameras_processing', sqlalchemy.String),
sqlalchemy.Column(
'cameras_processed', sqlalchemy.String),
)
class RecordingController(Controller):
sessions = {}
observable = Subject()
def parse_row(self, row):
return {
'id': row['id'],
'participant_id': row['participant_id'],
'name': row['name'],
'file_path': row['file_path'],
'start_time': row['start_time'],
'end_time': row['end_time'],
'state': row['state'],
'cameras_recorded': json.loads(row['cameras_recorded']),
'cameras_processing': json.loads(row['cameras_processing']),
'cameras_processed': json.loads(row['cameras_processed']),
}
async def get_recording_metadata(self, recording):
participant = await participant_controller.get(recording.get('participant_id'))
study = await studies_controller.get(participant.get('study_id'))
return {
'recording_name': recording.get('name'),
'start_time': recording.get('start_time'),
'end_time': recording.get('end_time'),
'participant_number': participant.get('number'),
'study_name': study.get('name'),
'researcher': study.get('researcher'),
}
async def get_recording_path(self, recording):
participant = await participant_controller.get(recording.get('participant_id'))
study = await studies_controller.get(participant.get('study_id'))
path = '{}_{}_{}'.format(study.get('name'),
participant.get('number'),
recording.get('name'))
return path.replace(' ', '_').replace(':', '_').replace('-', '_')
async def get_local_storage_path(self, recording):
recording_path = await self.get_recording_path(recording)
return '{}/{}'.format(LOCAL_STORAGE_PATH, recording_path)
async def get_remote_storage_path(self, recording):
recording_path = await self.get_recording_path(recording)
return '{}/{}'.format(REMOTE_STORAGE_PATH, recording_path)
async def get_camera_file_path(self, recording, camera_id):
local_path = await self.get_local_storage_path(recording)
camera = await camera_controller.get(camera_id)
return '{}_{}.mp4'.format(local_path, camera.get('name', camera_id).replace(' ', '_'))
#new start
async def sendtrigger(self, recording):
start_time = recording.get('start_time')
current_time = datetime.now().astimezone()
while current_time < start_time and self.recording:
time.sleep(0.001)
current_time = datetime.now().astimezone()
trigger.on()
time.sleep(2)
trigger.off()
#new end
async def start(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
recording['start_time'] = (
datetime.now() + timedelta(seconds=3)).astimezone().isoformat()
recording['end_time'] = None
recording['state'] = 'recording'
recording['cameras_processed'] = []
recording['cameras_processing'] = []
cameras_recorded = await camera_controller.send_command({
'event': 'start_recording',
'data': recording,
})
recording['cameras_recorded'] = cameras_recorded
recording = await self.update(recording)
#send trigger to TMSi Porti
thread = threading.Thread(
target=self.sendtrigger, args=(recording), daemon=True)
thread.start()
return recording
async def stop(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
await camera_controller.send_command({
'event': 'stop_recording',
'data': recording,
})
recording['end_time'] = datetime.now().astimezone().isoformat()
recording['state'] = 'unprocessed'
recording = await self.update(recording)
return recording
async def discard(self, recording_id):
async with self.db.transaction():
recording = await self.get(recording_id)
if not recording:
return
await camera_controller.send_command({
'event': 'discard_recording',
'data': recording,
})
recording['start_time'] = None
recording['end_time'] = None
recording['cameras_recorded'] = []
recording['state'] = 'empty'
recording = await self.update(recording)
return recording
async def process(self, recording_id):
async with self.db.transaction():
await self.db.execute('LOCK TABLE recordings IN SHARE ROW EXCLUSIVE MODE')
recording = await self.get(recording_id)
if not recording:
return
local_storage_path = await self.get_local_storage_path(recording)
remote_storage_path = await self.get_remote_storage_path(recording)
if not os.path.exists(remote_storage_path):
os.makedirs(remote_storage_path)
recording_metadata = await self.get_recording_metadata(recording)
metadata_path = '{}/metadata.json'.format(remote_storage_path)
with open(metadata_path, 'w') as file:
file.write(json.dumps(recording_metadata, indent=2))
cameras_processing = await camera_controller.send_command({
'event': 'process_recording',
'data': recording,
})
recording['cameras_processing'] = cameras_processing
recording['cameras_processed'] = []
recording['state'] = 'processing'
recording = await self.update(recording)
return recording
async def processed(self, recording_id, camera_id):
recording = await self.get(recording_id)
camera = await camera_controller.get(camera_id)
source = await self.get_camera_file_path(recording, camera_id)
base_path = await self.get_remote_storage_path(recording)
dest = '{}/{}.mp4'.format(base_path, camera.get('name', camera_id))
thread = threading.Thread(
target=self.upload, args=(source, dest), daemon=True)
thread.start()
async with self.db.transaction():
await self.db.execute('LOCK TABLE recordings IN SHARE ROW EXCLUSIVE MODE')
recording = await self.get(recording.get('id'))
if not recording:
return
recording['cameras_processing'].remove(camera_id)
recording['cameras_processed'].append(camera_id)
if len(recording['cameras_processing']) == 0:
recording['state'] = 'processed'
recording = await self.update(recording)
return recording
def upload(self, source, dest):
logger.info('Upload {} to {}'.format(source, dest))
shutil.move(source, dest)
logger.info('Done uploading {}'.format(dest))
async def get_all(self):
query = recordings.select()
results = await self.db.fetch_all(query)
return [self.parse_row(result) for result in results]
async def create(self, recording):
recording_id = shortuuid.uuid()
query = recordings.insert().values(
id=recording_id,
participant_id=recording.get('participant_id'),
name=recording.get('name'),
file_path=recording.get('file_path'),
start_time=recording.get('start_time'),
end_time=recording.get('end_time'),
cameras_recorded=json.dumps([]),
cameras_processing=json.dumps([]),
cameras_processed=json.dumps([]),
state='empty',
)
await self.db.execute(query)
recording = await self.get(recording_id)
await self.broadcast({
'event': 'create',
'entity': recording
})
return recording
async def get(self, recording_id):
query = recordings.select().where(recordings.c.id == recording_id)
result = await self.db.fetch_one(query)
if result:
return self.parse_row(result)
async def update(self, recording):
query = recordings.update().where(recordings.c.id == recording.get('id')).values(
id=recording.get('id'),
participant_id=recording.get('participant_id'),
name=recording.get('name'),
file_path=recording.get('file_path'),
start_time=recording.get('start_time'),
end_time=recording.get('end_time'),
cameras_recorded=json.dumps(recording.get('cameras_recorded')),
cameras_processing=json.dumps(recording.get('cameras_processing')),
cameras_processed=json.dumps(recording.get('cameras_processed')),
state=recording.get('state'),
)
await self.db.execute(query)
recording = await self.get(recording.get('id'))
await self.broadcast({
'event': 'update',
'entity': recording
})
return recording
async def delete(self, recording_id):
recording = await self.get(recording_id)
query = recordings.delete().where(recordings.c.id == recording_id)
await self.db.execute(query)
await self.broadcast({
'event': 'delete',
'entity': recording
})
return recording
recording_controller = RecordingController()
|
tcp3server.py | import socket
import threading
import CompressAndDecompress
from CompressAndDecompress import Compressed
from ToSendFromServer import ToFindData
from EncyrptAndDecrypt import Encrypt
class TCPserver():
def __init__(self):
self.server_ip='localhost'
self.server_port = 9999
def main(self):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((self.server_ip, self.server_port))
server.listen(1)
print(f'[*] Listening on {self.server_ip}:{self.server_port}')
while True:
client, address = server.accept()
print(f'[*] Accepted connection from {address[0]}:{address[1]}')
client_handler =threading.Thread(target=self.handle_client, args=(client,))
client_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
request = sock.recv(1024)
# print("data List",type(request))
# # print(f'CypherText{dataList[0]}: Key{dataList[1]}')
print("####################################################")
print(f'[*] Received: {request.decode("utf-8")}')
testString = request.decode("utf-8")
testString = testString.split(',')
print(f'CypherText{testString[0]} : Key:{testString[1]}')
cypherText :int = int(testString[0])
cypherKey : int = int(testString[1])
toEcrypt = Encrypt()
DecryptedData : int =int(toEcrypt.decrypt(cypherText,cypherKey))
print("Decrypted Data:",DecryptedData)
print("Decrypted Data: Type",type(DecryptedData))
#To Decompress
gene="Hello"
toCompressed : Compressed =Compressed()
DecompressedData=toCompressed.decompress(DecryptedData)
print("Received Data From Clients>>:#####",DecompressedData)
#ToCheckData From Database
backDataFromDB =self.toSendclient(DecompressedData)
print("BackDataFromDB",backDataFromDB)
#ToCompress data From DB
DB_Compress : Compressed = Compressed()
CompressededDataFromDB :int =DB_Compress.compress(backDataFromDB)
print("Compressed Data From DB",bin(CompressededDataFromDB))
#to Encrypt
#toCheck
objEncrypt = Encrypt()
CypherText, Key = objEncrypt.encrypt(str(CompressededDataFromDB)) #will get int type
print("CyphterText is:::::: and Key ", CypherText, Key)
strCypherText :str = str(CypherText)
strKey : str = str(Key)
ClientMessage = bytes(strCypherText, 'utf-8')
key = bytes(strKey, 'utf-8')
iden = bytes(',', 'utf-8')
SendSmsToCleint =ClientMessage + iden + key
print("Success data to send",SendSmsToCleint)
#Changing to integer to decompress
# Received: int = int(request.decode("utf-8"))
# print("Type of Received:>",type(Received))
#1-calling compressAndDecompress
#2-call Compress Class from compress and decompress
# deletestring='test'
#working code
# changeData = Compressed(deletestring,Received)
# DeData:str =changeData.decompress()
# print("Decompressed Data From client:>",DeData)
sock.send(SendSmsToCleint)
# toSend=bytes(request)
# sock.send()
def toSendclient(self,receivedData):
ToFind=ToFindData(receivedData)
return ToFind.dbFinder()
if __name__ == '__main__':
Myserver = TCPserver()
Myserver.main()
|
test_qftpd.py | __author__ = 'mbott'
import os
import json
import unittest
from time import sleep
from qumulo.rest_client import RestClient
from qumulo.lib.request import RequestError
# Modules under test
import qftpd
# Qumulo RESTful API address/port and credentials
# Requires a running cluster and admin creds
# Update values in qftpd.py for now
API_HOST=qftpd.API_HOST
API_PORT=qftpd.API_PORT
API_USER=qftpd.API_USER
API_PASS=qftpd.API_PASS
FILE_QSTAT = """{
"change_time": "2015-03-05T02:01:53.498584694Z",
"mode": "0644",
"file_number": "4",
"group": "17179871184",
"id": "4",
"path": "/file.txt",
"name": "file.txt",
"num_links": 1,
"child_count": 0,
"blocks": "1",
"type": "FS_FILE_TYPE_FILE",
"owner": "12884903978",
"size": "5",
"modification_time": "2015-03-05T02:01:58.412045121Z",
"creation_time": "2015-03-05T02:01:53.498584694Z"
}"""
#FILE_PSTAT = dict(
# st_mode=33188,
# st_ino=4,
# st_dev=436207686L,
# st_nlink=1,
# st_uid=2090,
# st_gid=2000,
# st_size=5,
# st_atime=1425520913,
# st_mtime=1425520918,
# st_ctime=1425520913)
DIR_QSTAT = """{
"change_time": "2015-03-05T02:01:25.282477271Z",
"mode": "0755",
"file_number": "3",
"group": "17179871184",
"id": "3",
"path": "/directory/",
"name": "directory",
"num_links": 2,
"child_count": 0,
"blocks": "0",
"type": "FS_FILE_TYPE_DIRECTORY",
"owner": "12884903978",
"size": "0",
"modification_time": "2015-03-05T02:01:21.433750274Z",
"creation_time": "2015-03-05T02:01:21.433750274Z"
}"""
def get_rc():
"""return a working instance of RestClient"""
rc = RestClient(API_HOST, API_PORT)
rc.login(API_USER, API_PASS)
return rc
def create_data(cleanup=False):
"""Helper to create small dir structure for testing"""
dirs = ['/directory']
files = ['/file.txt', '/directory/nestedfile.txt']
if not cleanup: # create dirs, then files
create_dirs(cleanup, dirs)
create_files(cleanup, files)
elif cleanup: # delete files, then dirs
create_files(cleanup, files)
create_dirs(cleanup, dirs)
def create_dirs(cleanup, dirs):
rc = get_rc()
for d in dirs:
path, name = os.path.split(d)
#print path, name
if not cleanup:
rc.fs.create_directory(name=name, dir_path=path)
elif cleanup:
rc.fs.delete(d + '/') # delete wants a trailing slash if isdir()
def create_files(cleanup, files):
rc = get_rc()
for f in files:
path, name = os.path.split(f)
#print path, name
if not cleanup:
rc.fs.create_file(name=name, dir_path=path)
elif cleanup:
rc.fs.delete(f)
class TestQftpdStat(unittest.TestCase):
def setUp(self):
self.qsfs = qftpd.AbstractedQSFS(u'/',None)
self.qsfs.rc = get_rc()
def test_time_conversion_timestamp_to_epoch(self):
timestamp = "2015-03-05T02:01:53.498584694Z"
target_epoch_time = 1425520913.498584
self.assertEqual(
target_epoch_time,
self.qsfs.convert_timestamp_to_epoch_seconds(timestamp))
def test_qstat_file_conversion_to_st_mode(self):
target_st_mode = 33188
self.assertEqual(
target_st_mode,
self.qsfs.get_st_mode(json.loads(FILE_QSTAT)))
def test_qstat_dir_conversion_to_st_mode(self):
target_st_mode = 16877
self.assertEqual(
target_st_mode,
self.qsfs.get_st_mode(json.loads(DIR_QSTAT)))
def test_qstat_file_conversion_to_st_ino(self):
target_st_ino = 4
self.assertEqual(
target_st_ino,
self.qsfs.get_st_ino(json.loads(FILE_QSTAT)))
def test_qstat_file_conversion_to_st_dev(self):
target_st_dev = 0
self.assertEqual(
target_st_dev,
# don't need to pass a FILE_QSTAT to get_st_dev()
self.qsfs.get_st_dev())
def test_qstat_file_conversion_to_st_nlink(self):
target_st_nlink = 1
self.assertEqual(
target_st_nlink,
self.qsfs.get_st_nlink(json.loads(FILE_QSTAT)))
def test_qstat_file_conversion_to_st_uid(self):
target_uid = 12884903978
self.assertEqual(
target_uid,
self.qsfs.get_st_uid(json.loads(FILE_QSTAT)))
def test_qstat_file_conversion_to_st_gid(self):
target_gid = 17179871184
self.assertEqual(
target_gid,
self.qsfs.get_st_gid(json.loads(FILE_QSTAT)))
def test_qstat_file_to_st_size(self):
target_size = 5
self.assertEqual(
target_size,
self.qsfs.get_st_size(json.loads(FILE_QSTAT)))
def test_qstat_file_to_st_atime(self):
target_atime = 1425520913
self.assertEqual(
target_atime,
self.qsfs.get_st_atime(json.loads(FILE_QSTAT)))
def test_qstat_file_to_st_mtime(self):
target_mtime = 1425520918
self.assertEqual(
target_mtime,
self.qsfs.get_st_mtime(json.loads(FILE_QSTAT)))
def test_qstat_file_to_st_ctime(self):
target_ctime = 1425520913
self.assertEqual(
target_ctime,
self.qsfs.get_st_ctime(json.loads(FILE_QSTAT)))
def test_get_user(self):
target_user = 'admin'
uid = 500
self.assertEqual(target_user, self.qsfs.get_user_by_uid(uid))
def test_get_group(self):
target_group = 'Users'
gid = 513
self.assertEqual(target_group, self.qsfs.get_group_by_gid(gid))
def test_get_nonexistent_user(self):
uid = 12884903978
self.assertEqual(str(uid), self.qsfs.get_user_by_uid(uid))
def test_get_nonexistent_group(self):
gid = 17179871184
self.assertEqual(str(gid), self.qsfs.get_group_by_gid(gid))
class TestQftpdFS(unittest.TestCase):
def setUp(self):
self.qsfs = qftpd.AbstractedQSFS(u'/', None)
self.qsfs.rc = get_rc()
self.dirs = ['/directory']
self.files = ['/file.txt', '/directory/nestedfile.txt']
create_data()
def tearDown(self):
create_data(cleanup=True)
def test_isfile(self):
for f in self.files:
print "testing %s" % f
self.assertTrue(self.qsfs.isfile(f))
class TestQftpdClients(unittest.TestCase):
def test_ftp_login(self):
import threading
T = threading.Thread(target=qftpd.main)
T.daemon = True
T.start()
from ftplib import FTP
ftp = FTP('localhost')
response = ftp.login(API_USER, API_PASS)
ftp.quit()
# changed the login message!
#self.assertEqual("230 Login successful.", response)
self.assertIn('Welcome to', response)
class TestTestData(unittest.TestCase):
def setUp(self):
self.rc = get_rc()
self.dirs = ['/directory']
self.files = ['/file.txt', '/directory/nestedfile.txt']
create_data()
def tearDown(self):
create_data(cleanup=True)
def test_testdir_creation(self):
for d in self.dirs:
response = self.rc.fs.get_attr(d)
self.assertEqual('FS_FILE_TYPE_DIRECTORY', response['type'])
def test_testfile_creation(self):
for f in self.files:
response = self.rc.fs.get_attr(f)
self.assertEqual('FS_FILE_TYPE_FILE', response['type'])
class TestWriteBuffer(unittest.TestCase):
def setUp(self):
"""Make an abstractedFS"""
self.fs = qftpd.AbstractedQSFS(u'/', None)
self.rc = get_rc()
self.fs.rc = self.rc
print str(self.fs)
print str(self.fs.rc)
def tearDown(self):
"""Get rid of the test_1234.txt file that gets written if
test_write_buffer_close_writes_qsfs() fails
"""
test_file_names = ['test_1234.txt', 'test_foo.txt']
local_rc = get_rc()
for filename in test_file_names:
print "target_name: %s" % filename
try:
local_rc.fs.delete(filename)
# file didn't get created but we're not testing for that here
except RequestError:
pass
def test_write_buffer_holds_stuff(self):
test_file_contents = "test " * 100
write_buffer = qftpd.WriteBuffer('/', 'test_foo.txt', self.fs)
write_buffer.write(test_file_contents)
write_buffer.seek(0)
result = write_buffer.read()
self.assertEqual(test_file_contents, result)
def test_write_buffer_close_writes_qsfs(self):
test_file_name = 'test_1234.txt'
test_file_contents = "test"
write_buffer = qftpd.WriteBuffer('/', test_file_name, self.fs)
write_buffer.write(test_file_contents)
write_buffer.close()
# sleep < 5sec results in intermittent failures, something about qsfs
# and (very) newly created files
sleep(5)
# verify there is a file on the fs
local_rc = get_rc()
full_path = os.path.join('/', test_file_name)
tempfile = qftpd.SpooledTemporaryFile()
local_rc.fs.read_file(tempfile, path=full_path)
tempfile.seek(0)
file_contents = tempfile.read()
self.assertEqual(test_file_contents, file_contents)
class TestQSFSAuthorizer(unittest.TestCase):
def test_has_user(self):
target_user = 'admin'
a = qftpd.QSFSAuthorizer()
self.assertTrue(a.has_user(target_user))
def test_doesnt_have_user(self):
target_user = 'foobar'
a = qftpd.QSFSAuthorizer()
self.assertFalse(a.has_user(target_user))
def test_authorizer_returns_restclient(self):
a = qftpd.QSFSAuthorizer()
target_class = RestClient
self.assertEquals(
type(a.impersonate_user(API_USER, API_PASS)),
target_class)
class TestQFTPAuthentication(unittest.TestCase):
def test_abstractedqsfs_has_no_restclient(self):
aqfs = qftpd.AbstractedQSFS(u'/', None)
self.assertIsNone(aqfs.rc)
def test_impersonate_user_returns_restclient(self):
a = qftpd.QSFSAuthorizer()
result = a.impersonate_user(API_USER, API_PASS)
self.assertIs(type(result), RestClient)
|
exersize6_1.py | import toolbox
import numpy as np
np.seterr(invalid='raise')
import pylab
import multiprocessing as mp
import time
from exersize1 import initialise
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = upwardNormals.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
todo = '''
need to re-do angle calculation so it's always to the normal.
normal is easy if we assume every reflection interface is horizontal.
the angle of incidence will then always be %90. save the
'''
import numpy as np
np.seterr(invalid="raise")
def worker(*args):
parms = args[0]
vmodel = np.frombuffer(vArray.get_obj(), dtype=np.float32)
vmodel = vmodel.reshape(parms['nx'], parms['nz'])
pmodel = np.frombuffer(pArray.get_obj(), dtype=np.float32)
pmodel = pmodel.reshape(parms['nx'], parms['nz'])
ups = np.frombuffer(upNormals.get_obj(), dtype=np.float32)
ups = ups.reshape(parms['nx'], parms['nz'])
downs = np.frombuffer(downNormals.get_obj(), dtype=np.float32)
downs = downs.reshape(parms['nx'], parms['nz'])
cell_list = np.array([0, 45, 90, 135, 180, 225, 270, 360])
cell_lookup={ 0:[0,-1],
45:[1,-1],
90:[1,0],
135:[1,1],
180:[0,1],
225:[-1,1],
270:[-1,0],
315:[-1,-1],
360:[0,-1],
}#angle to array index adjustment
alive=1
arr = args[1]
output = []
'''the actual iterator'''
remainder = 0
while alive:
angle = parms['angle']%360
step = cell_list[np.abs(cell_list-angle).argmin()]
remainder += angle - step
if remainder > 45:
step += 45
remainder -= 45
if remainder < -45:
step -= 45
remainder += 45
try:
new_cell = cell_lookup[step]
except KeyError:
print 'cell key error!'
break
tx, tz = parms['x'],parms['z']
try:
p1 = pmodel[parms['x'],parms['z']]
v1 = vmodel[parms['x'],parms['z']]
parms['x']+=new_cell[0]
parms['z']+=new_cell[1]
if (parms['x'] < 0) or(parms['z'] < 0): break
p2 = pmodel[parms['x'],parms['z']]
v2 = vmodel[parms['x'],parms['z']]
except IndexError:
#~ print 'step error!', parms['x'], parms['z']
break
if ((p1 != p2) or (v1 != v2)):
if 90 <= angle < 270:
normal = downs[tx, tz]
incidence = normal - angle #rotated
reflection = normal - 180 + incidence #compass
try:
ic = np.degrees(np.arcsin(v1/v2))
except FloatingPointError:
ic = 180
#transmissions
if np.abs(incidence) < ic:
transmission = np.degrees(np.arcsin(v2*np.sin(np.radians(incidence))/v1))
#~
#~ print ''
parms['angle'] = normal - transmission
else:
#~ print angle, normal, incidence, v1, v2, reflection, ic, transmission, tx, tz
break
else:
normal = ups[tx, tz]
incidence = normal - angle #rotated
reflection = normal - 180 + incidence #compass
try:
ic = np.degrees(np.arcsin(v1/v2))
except FloatingPointError:
ic = 999
#transmissions
if np.abs(incidence) < ic:
transmission = np.degrees(np.arcsin(v2*np.sin(np.radians(incidence))/v1))
#~
#~ print ''
parms['angle'] = normal - transmission
else:
#~ print angle, normal, incidence, v1, v2, reflection, ic, transmission, tx, tz
break
#~ print normal, angle
#~ break
new_kwargs = parms.copy()
new_kwargs['angle'] = reflection
new_kwargs['z'] -= new_cell[1]
new_kwargs['x'] -= new_cell[0]
new_kwargs['gen'] += 1
if new_kwargs['gen'] < new_kwargs['maxgen']: newworkers.put(new_kwargs)
output.append([parms['x'],parms['z']])
alive += 1
if alive > 4000:break
arr.append(output)
return
if __name__ == "__main__":
workspace, parameters = initialise(filename='model2.png')
#holding array
manager = mp.Manager()
arr = manager.list()
cross = parameters['model']['vp']
nz = cross.shape[-1]
nx = cross.shape[-2]
dz = np.zeros_like(cross)
dx = np.zeros_like(cross)
dz[:,1:] = np.diff(cross, axis=-1)
dx[1:,:] = np.diff(cross, axis=-2)
im = np.hypot(dx, dz)
im[im != 0.0] = 1
inds = zip(*np.nonzero(im))
downwardNormals = np.zeros_like(cross)
upwardNormals = np.zeros_like(cross)
for x, z in inds:
stamp = im[x-3:x+3,z-3:z+3]
if stamp.shape == (6,6):
locs = np.nonzero(stamp)
xi = locs[0]
A = np.array([xi, np.ones_like(xi)])
# linearly generated sequence
yi = locs[1]
w = np.linalg.lstsq(A.T,yi)[0][0]
#~ if 520 < x < 600 and 1200 < z < 1300:
#~ print x, z, np.degrees(np.arctan2(w, -1) ) %360
#~ pylab.pcolor(stamp.T)
#~ pylab.show()
downwardNormals[x-1:x+1,z-1:z+1] = np.degrees(np.arctan2(w, 1) + np.pi) %360
upwardNormals[x-1:x+1,z-1:z+1] = np.degrees(np.arctan2(w, 1) ) %360
vArray = mp.Array('f', cross.flatten())
pArray = mp.Array('f', cross.flatten())
upNormals = mp.Array('f', upwardNormals.flatten())
downNormals = mp.Array('f', downwardNormals.flatten())
workers = mp.Queue()
newworkers = mp.Queue()
nprocesses = 4
jobs = []
print "gonna spawn some rays!"
angles = np.arange(135,226,3)
jobs = []
tmp = []
for angle in angles:
kwargs={'x':int(nx/2.),
'z':1,
't':0,
'angle':angle,
'amplitude':1,
'nx':nx,
'nz':nz,
'gen':0,
'maxgen':1,
}
workers.put(kwargs)
print "starting queue built!"
time.sleep(0.1)
#~ print workers.qsize()
while True:
for _ in range(min(nprocesses, workers.qsize())):
args = workers.get()
p = mp.Process(target=worker, args=(args,arr))
jobs.append(p)
tmp.append(args)
p.start()
time.sleep(0.1)
while newworkers.qsize(): workers.put(newworkers.get())
for i, j in enumerate(jobs):
j.join()
jobs = []
tmp = []
print workers.qsize()
if workers.qsize() ==0: break
print 'plotting!'
for item in arr:
try:
x,y = zip(*item)
pylab.plot(x, y, 'k')
pylab.xlim([0,nx])
pylab.ylim([nz,0])
except ValueError:
pass
pylab.imshow(cross.T)
pylab.plot(parameters['gx'][::40], np.ones_like(parameters['gx'][::40]), 'gD')
pylab.show()
|
diskover.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2019
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from scandir import scandir
from rq import SimpleWorker, Queue
from rq.registry import StartedJobRegistry
from datetime import datetime
from random import randint
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from multiprocessing import cpu_count
from threading import Thread, Lock
try:
from queue import Queue as PyQueue
except ImportError:
from Queue import Queue as PyQueue
import progressbar
import argparse
import logging
import importlib
import time
import math
import re
import os
import sys
import json
version = '1.5.0.7'
__version__ = version
IS_PY3 = sys.version_info >= (3, 0)
def print_banner(version):
"""This is the print banner function.
It prints a random banner.
"""
c = randint(1, 4)
if c == 1:
color = '31m'
elif c == 2:
color = '32m'
elif c == 3:
color = '33m'
elif c == 4:
color = '35m'
b = randint(1, 4)
if b == 1:
banner = """\033[%s
________ .__ __
\______ \ |__| _____| | _________ __ ___________
| | \| |/ ___/ |/ / _ \ \/ // __ \_ __ \\ /)___(\\
| ` \ |\___ \| < <_> ) /\ ___/| | \/ (='.'=)
/_______ /__/____ >__|_ \____/ \_/ \___ >__| (\\")_(\\")
\/ \/ \/ \/
v%s
https://shirosaidev.github.io/diskover
Crawling all your stuff.
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 2:
banner = """\033[%s
___ ___ ___ ___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\__\ /\ \ /\__\ /\ \ /\ \\
/::\ \ _\:\ \ /::\ \ /:/ _/_ /::\ \ /:/ _/_ /::\ \ /::\ \\
/:/\:\__\ /\/::\__\ /\:\:\__\ /::-"\__\ /:/\:\__\ |::L/\__\ /::\:\__\ /::\:\__\\
\:\/:/ / \::/\/__/ \:\:\/__/ \;:;-",-" \:\/:/ / |::::/ / \:\:\/ / \;:::/ /
\::/ / \:\__\ \::/ / |:| | \::/ / L;;/__/ \:\/ / |:\/__/
\/__/ \/__/ \/__/ \|__| \/__/ \/__/ \|__|
v%s
https://shirosaidev.github.io/diskover
Bringing light to the darkness.
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 3:
banner = """\033[%s
_/_/_/ _/ _/
_/ _/ _/_/_/ _/ _/ _/_/ _/ _/ _/_/ _/ _/_/
_/ _/ _/ _/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/_/
_/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/
_/_/_/ _/ _/_/_/ _/ _/ _/_/ _/ _/_/_/ _/
v%s
https://shirosaidev.github.io/diskover
"I didn't even know that was there."
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 4:
banner = """\033[%s
__ __
/\ \ __ /\ \\
\_\ \/\_\ ____\ \ \/'\\ ___ __ __ __ _ __ //
/'_` \/\ \ /',__\\\ \ , < / __`\/\ \/\ \ /'__`\/\`'__\\ ('>
/\ \L\ \ \ \/\__, `\\\ \ \\\`\ /\ \L\ \ \ \_/ |/\ __/\ \ \/ /rr
\ \___,_\ \_\/\____/ \ \_\ \_\ \____/\ \___/ \ \____\\\ \\_\\ *\))_
\/__,_ /\/_/\/___/ \/_/\/_/\/___/ \/__/ \/____/ \\/_/
v%s
https://shirosaidev.github.io/diskover
"Holy s*i# there are so many temp files."
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
sys.stdout.write(banner)
sys.stdout.write('\n')
sys.stdout.flush()
def load_config():
"""This is the load config function.
It checks for config file and loads in
the config settings.
"""
configsettings = {}
config = ConfigParser.ConfigParser()
dir_path = os.path.dirname(os.path.realpath(__file__))
# check if env var for config file and use that
try:
configfile = os.environ['DISKOVER_CONFIG']
except KeyError:
configfile = '%s/diskover.cfg' % dir_path
pass
# Check for config file
if not os.path.isfile(configfile):
print('Config file %s not found, exiting.' % configfile)
sys.exit(1)
config.read(configfile)
# Check if any sections missing from config and exit if there is
try:
try:
d = config.get('excludes', 'dirs')
dirs = d.split(',')
configsettings['excluded_dirs'] = set(dirs)
except ConfigParser.NoOptionError:
configsettings['excluded_dirs'] = set([])
try:
f = config.get('excludes', 'files')
files = f.split(',')
configsettings['excluded_files'] = set(files)
except ConfigParser.NoOptionError:
configsettings['excluded_files'] = set([])
try:
d = config.get('includes', 'dirs')
dirs = d.split(',')
configsettings['included_dirs'] = set(dirs)
except (ConfigParser.NoOptionError):
configsettings['included_dirs'] = set([])
try:
f = config.get('includes', 'files')
files = f.split(',')
configsettings['included_files'] = set(files)
except ConfigParser.NoOptionError:
configsettings['included_files'] = set([])
try:
configsettings['ownersgroups_uidgidonly'] = config.get('ownersgroups', 'uidgidonly').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_uidgidonly'] = "false"
try:
configsettings['ownersgroups_domain'] = config.get('ownersgroups', 'domain').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_domain'] = "false"
try:
configsettings['ownersgroups_domainsep'] = config.get('ownersgroups', 'domainsep')
except ConfigParser.NoOptionError:
configsettings['ownersgroups_domainsep'] = "\\"
try:
configsettings['ownersgroups_keepdomain'] = config.get('ownersgroups', 'keepdomain').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_keepdomain'] = "false"
try:
t = config.get('autotag', 'files')
if os.path.isfile("%s/%s" % (os.getcwd(),t)):
atf = json.loads(open("%s/%s" % (os.getcwd(),t)).read())
else:
atf = json.loads(t)
configsettings['autotag_files'] = atf
except ValueError as e:
raise ValueError("Error in config autotag files: %s" % e)
except ConfigParser.NoOptionError:
configsettings['autotag_files'] = []
try:
t = config.get('autotag', 'dirs')
if os.path.isfile("%s/%s" % (os.getcwd(),t)):
atd = json.loads(open("%s/%s" % (os.getcwd(),t)).read())
else:
atd = json.loads(t)
configsettings['autotag_dirs'] = atd
except ValueError as e:
raise ValueError("Error in config autotag dirs: %s" % e)
except ConfigParser.NoOptionError:
configsettings['autotag_dirs'] = []
try:
configsettings['aws'] = config.get('elasticsearch', 'aws').lower()
except ConfigParser.NoOptionError:
configsettings['aws'] = "false"
try:
h = config.get('elasticsearch', 'host')
hosts = h.split(',')
configsettings['es_host'] = hosts
except ConfigParser.NoOptionError:
configsettings['es_host'] = ['localhost']
try:
configsettings['es_port'] = int(config.get('elasticsearch', 'port'))
except ConfigParser.NoOptionError:
configsettings['es_port'] = 9200
try:
configsettings['es_user'] = config.get('elasticsearch', 'user')
except ConfigParser.NoOptionError:
configsettings['es_user'] = ""
try:
configsettings['es_password'] = config.get('elasticsearch', 'password')
except ConfigParser.NoOptionError:
configsettings['es_password'] = ""
try:
configsettings['index'] = config.get('elasticsearch', 'indexname')
except ConfigParser.NoOptionError:
configsettings['index'] = ""
try:
configsettings['es_timeout'] = int(config.get('elasticsearch', 'timeout'))
except ConfigParser.NoOptionError:
configsettings['es_timeout'] = 10
try:
configsettings['es_maxsize'] = int(config.get('elasticsearch', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['es_maxsize'] = 10
try:
configsettings['es_max_retries'] = int(config.get('elasticsearch', 'maxretries'))
except ConfigParser.NoOptionError:
configsettings['es_max_retries'] = 0
try:
configsettings['es_wait_status_yellow'] = config.get('elasticsearch', 'wait').lower()
except ConfigParser.NoOptionError:
configsettings['es_wait_status_yellow'] = "false"
try:
configsettings['es_chunksize'] = int(config.get('elasticsearch', 'chunksize'))
except ConfigParser.NoOptionError:
configsettings['es_chunksize'] = 500
try:
configsettings['index_shards'] = int(config.get('elasticsearch', 'shards'))
except ConfigParser.NoOptionError:
configsettings['index_shards'] = 5
try:
configsettings['index_replicas'] = int(config.get('elasticsearch', 'replicas'))
except ConfigParser.NoOptionError:
configsettings['index_replicas'] = 1
try:
configsettings['index_refresh'] = config.get('elasticsearch', 'indexrefresh')
except ConfigParser.NoOptionError:
configsettings['index_refresh'] = "1s"
try:
configsettings['disable_replicas'] = config.get('elasticsearch', 'disablereplicas').lower()
except ConfigParser.NoOptionError:
configsettings['disable_replicas'] = "false"
try:
configsettings['index_translog_size'] = config.get('elasticsearch', 'translogsize')
except ConfigParser.NoOptionError:
configsettings['index_translog_size'] = "512mb"
try:
configsettings['es_scrollsize'] = int(config.get('elasticsearch', 'scrollsize'))
except ConfigParser.NoOptionError:
configsettings['es_scrollsize'] = 100
try:
configsettings['redis_host'] = config.get('redis', 'host')
except ConfigParser.NoOptionError:
configsettings['redis_host'] = "localhost"
try:
configsettings['redis_port'] = int(config.get('redis', 'port'))
except ConfigParser.NoOptionError:
configsettings['redis_port'] = 6379
try:
configsettings['redis_socket'] = config.get('redis', 'socket')
except ConfigParser.NoOptionError:
configsettings['redis_socket'] = ""
try:
configsettings['redis_password'] = config.get('redis', 'password')
except ConfigParser.NoOptionError:
configsettings['redis_password'] = ""
try:
configsettings['redis_db'] = int(config.get('redis', 'db'))
except ConfigParser.NoOptionError:
configsettings['redis_db'] = 0
try:
configsettings['redis_rq_timeout'] = int(config.get('redis', 'timeout'))
except ConfigParser.NoOptionError:
configsettings['redis_rq_timeout'] = 180
try:
configsettings['redis_ttl'] = int(config.get('redis', 'ttl'))
except ConfigParser.NoOptionError:
configsettings['redis_ttl'] = 500
try:
configsettings['redis_queue'] = config.get('redis', 'queue')
except ConfigParser.NoOptionError:
configsettings['redis_queue'] = "diskover"
try:
configsettings['redis_queue_crawl'] = config.get('redis', 'queuecrawl')
except ConfigParser.NoOptionError:
configsettings['redis_queue_crawl'] = "diskover_crawl"
try:
configsettings['redis_queue_calcdir'] = config.get('redis', 'queuecalcdir')
except ConfigParser.NoOptionError:
configsettings['redis_queue_calcdir'] = "diskover_calcdir"
try:
configsettings['adaptivebatch_startsize'] = int(config.get('adaptivebatch', 'startsize'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_startsize'] = 50
try:
configsettings['adaptivebatch_maxsize'] = int(config.get('adaptivebatch', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['autobatch_maxsize'] = 500
try:
configsettings['adaptivebatch_stepsize'] = int(config.get('adaptivebatch', 'stepsize'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_stepsize'] = 10
try:
configsettings['adaptivebatch_maxfiles'] = int(config.get('adaptivebatch', 'maxfiles'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_maxfiles'] = 50000
try:
configsettings['listener_host'] = config.get('socketlistener', 'host')
except ConfigParser.NoOptionError:
configsettings['listener_host'] = "localhost"
try:
configsettings['listener_port'] = int(config.get('socketlistener', 'port'))
except ConfigParser.NoOptionError:
configsettings['listener_port'] = 9999
try:
configsettings['listener_maxconnections'] = int(config.get('socketlistener', 'maxconnections'))
except ConfigParser.NoOptionError:
configsettings['listener_maxconnections'] = 5
try:
configsettings['listener_twcport'] = int(config.get('socketlistener', 'twcport'))
except ConfigParser.NoOptionError:
configsettings['listener_twcport'] = 9998
try:
configsettings['diskover_path'] = config.get('paths', 'diskoverpath')
except ConfigParser.NoOptionError:
configsettings['diskover_path'] = "./diskover.py"
try:
configsettings['python_path'] = config.get('paths', 'pythonpath')
except ConfigParser.NoOptionError:
configsettings['python_path'] = "python"
try:
configsettings['md5_readsize'] = int(config.get('dupescheck', 'readsize'))
except ConfigParser.NoOptionError:
configsettings['md5_readsize'] = 65536
try:
configsettings['dupes_maxsize'] = int(config.get('dupescheck', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['dupes_maxsize'] = 1073741824
try:
configsettings['dupes_checkbytes'] = int(config.get('dupescheck', 'checkbytes'))
except ConfigParser.NoOptionError:
configsettings['dupes_checkbytes'] = 64
try:
configsettings['dupes_restoretimes'] = config.get('dupescheck', 'restoretimes').lower()
except ConfigParser.NoOptionError:
configsettings['dupes_restoretimes'] = "false"
try:
configsettings['dupes_threads'] = int(config.get('dupescheck', 'threads'))
except ConfigParser.NoOptionError:
configsettings['dupes_threads'] = 8
try:
configsettings['gource_maxfilelag'] = float(config.get('gource', 'maxfilelag'))
except ConfigParser.NoOptionError:
configsettings['gource_maxfilelag'] = 5
try:
configsettings['api_url'] = config.get('crawlapi', 'url')
except ConfigParser.NoOptionError:
configsettings['api_url'] = ""
try:
configsettings['api_user'] = config.get('crawlapi', 'user')
except ConfigParser.NoOptionError:
configsettings['api_user'] = ""
try:
configsettings['api_password'] = config.get('crawlapi', 'password')
except ConfigParser.NoOptionError:
configsettings['api_password'] = ""
try:
configsettings['api_pagesize'] = config.get('crawlapi', 'pagesize')
except ConfigParser.NoOptionError:
configsettings['api_pagesize'] = ""
except ConfigParser.NoSectionError as e:
print('Missing section from diskover.cfg, check diskover.cfg.sample and copy over, exiting. (%s)' % e)
sys.exit(1)
return configsettings, configfile
def get_plugins_info():
"""This is the get plugins info function.
It gets a list of python plugins info (modules) in
the plugins directory and returns the plugins information.
"""
plugin_dir = os.path.dirname(os.path.realpath(__file__)) + "/plugins"
main_module = "__init__"
plugins_info = []
possible_plugins = os.listdir(plugin_dir)
for i in possible_plugins:
location = os.path.join(plugin_dir, i)
if not os.path.isdir(location) or not main_module + ".py" \
in os.listdir(location):
continue
spec = importlib.machinery.PathFinder().find_spec(main_module, [location])
plugins_info.append({"name": i, "spec": spec})
return plugins_info
def load_plugins():
"""This is the load plugins function.
It dynamically load the plugins and return them in a list
"""
loaded_plugins = []
plugins_info = get_plugins_info()
for plugin_info in plugins_info:
plugin_module = importlib.util.module_from_spec(plugin_info["spec"])
plugin_info["spec"].loader.exec_module(plugin_module)
loaded_plugins.append(plugin_module)
return loaded_plugins
def list_plugins():
"""This is the list plugins function.
It prints the name of all the available plugins
"""
plugins_info = get_plugins_info()
for plugin_info in plugins_info:
print(plugin_info["name"])
def user_prompt(question):
""" Prompt the yes/no-*question* to the user. """
from distutils.util import strtobool
while True:
try:
if IS_PY3:
user_input = input(question + " [y/n]: ").lower()
else:
user_input = raw_input(question + " [y/n]: ").lower()
result = strtobool(user_input)
return result
except ValueError:
print("Please use y/n or yes/no.\n")
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def index_create(indexname):
"""This is the es index create function.
It checks for existing index and deletes if
there is one with same name. It also creates
the new index and sets up mappings.
"""
logger.info('Checking es index: %s', indexname)
# check for existing es index
if es.indices.exists(index=indexname):
# check if reindex cli argument and don't delete existing index
if cliargs['reindex']:
logger.info('Reindexing (non-recursive, preserving tags)')
return
elif cliargs['reindexrecurs']:
logger.info('Reindexing (recursive, preserving tags)')
return
# delete existing index
else:
if cliargs['forcedropexisting']:
logger.warning('es index exists, deleting')
es.indices.delete(index=indexname, ignore=[400, 404])
else:
if user_prompt("Drop existing index?"):
logger.warning('es index exists, deleting')
es.indices.delete(index=indexname, ignore=[400, 404])
else:
logger.info("Cannot continue with index. Exiting.")
sys.exit(1)
# set up es index mappings and create new index
mappings = {
"settings": {
"index" : {
"number_of_shards": config['index_shards'],
"number_of_replicas": config['index_replicas']
}
},
"mappings": {
"diskspace": {
"properties": {
"path": {
"type": "keyword"
},
"total": {
"type": "long"
},
"used": {
"type": "long"
},
"free": {
"type": "long"
},
"available": {
"type": "long"
},
"indexing_date": {
"type": "date"
}
}
},
"crawlstat": {
"properties": {
"path": {
"type": "keyword"
},
"state": {
"type": "text"
},
"crawl_time": {
"type": "float"
},
"indexing_date": {
"type": "date"
}
}
},
"worker": {
"properties": {
"worker_name": {
"type": "keyword"
},
"dir_count": {
"type": "integer"
},
"file_count": {
"type": "integer"
},
"bulk_time": {
"type": "float"
},
"crawl_time": {
"type": "float"
},
"indexing_date": {
"type": "date"
}
}
},
"directory": {
"properties": {
"filename": {
"type": "keyword"
},
"path_parent": {
"type": "keyword"
},
"filesize": {
"type": "long"
},
"items": {
"type": "long"
},
"items_files": {
"type": "long"
},
"items_subdirs": {
"type": "long"
},
"owner": {
"type": "keyword"
},
"group": {
"type": "keyword"
},
"last_modified": {
"type": "date"
},
"last_access": {
"type": "date"
},
"last_change": {
"type": "date"
},
"hardlinks": {
"type": "integer"
},
"inode": {
"type": "keyword"
},
"tag": {
"type": "keyword"
},
"tag_custom": {
"type": "keyword"
},
"crawl_time": {
"type": "float"
},
"change_percent_filesize": {
"type": "float"
},
"change_percent_items": {
"type": "float"
},
"change_percent_items_files": {
"type": "float"
},
"change_percent_items_subdirs": {
"type": "float"
},
"worker_name": {
"type": "keyword"
},
"indexing_date": {
"type": "date"
}
}
},
"file": {
"properties": {
"filename": {
"type": "keyword"
},
"extension": {
"type": "keyword"
},
"path_parent": {
"type": "keyword"
},
"filesize": {
"type": "long"
},
"owner": {
"type": "keyword"
},
"group": {
"type": "keyword"
},
"last_modified": {
"type": "date"
},
"last_access": {
"type": "date"
},
"last_change": {
"type": "date"
},
"hardlinks": {
"type": "integer"
},
"inode": {
"type": "keyword"
},
"filehash": {
"type": "keyword"
},
"tag": {
"type": "keyword"
},
"tag_custom": {
"type": "keyword"
},
"dupe_md5": {
"type": "keyword"
},
"worker_name": {
"type": "keyword"
},
"indexing_date": {
"type": "date"
}
}
}
}
}
# check plugins for additional mappings
for plugin in plugins:
mappings = (plugin.add_mappings(mappings))
logger.info('Creating es index')
es.indices.create(index=indexname, body=mappings)
time.sleep(.5)
def index_bulk_add(es, doclist, config, cliargs):
"""This is the es index bulk add function.
It bulk adds/updates/removes using file/directory
meta data lists from worker's crawl results.
"""
if config['es_wait_status_yellow'] == "true":
# wait for es health to be at least yellow
es.cluster.health(wait_for_status='yellow',
request_timeout=config['es_timeout'])
# bulk load data to Elasticsearch index
diskover_connections.helpers.bulk(es, doclist, index=cliargs['index'],
chunk_size=config['es_chunksize'], request_timeout=config['es_timeout'])
def index_delete_path(path, cliargs, logger, reindex_dict, recursive=False):
"""This is the es delete path bulk function.
It finds all file and directory docs in path and deletes them from es
including the directory (path).
Recursive will also find and delete all docs in subdirs of path.
Stores any existing tags in reindex_dict.
Returns reindex_dict.
"""
file_id_list = []
dir_id_list = []
file_delete_list = []
dir_delete_list = []
# refresh index
es.indices.refresh(index=cliargs['index'])
# escape special characters
newpath = escape_chars(path)
# create wildcard string and check for / (root) path
if newpath == '\/':
newpathwildcard = '\/*'
else:
newpathwildcard = newpath + '\/*'
# file doc search
if recursive:
data = {
"query": {
"query_string": {
"query": "path_parent: " + newpath + " OR "
"path_parent: " + newpathwildcard,
"analyze_wildcard": "true"
}
}
}
else:
data = {
"query": {
"query_string": {
"query": "path_parent: " + newpath
}
}
}
logger.info('Searching for all files in %s' % path)
# search es and start scroll
res = es.search(index=cliargs['index'], doc_type='file', scroll='1m',
size=config['es_scrollsize'], body=data,
request_timeout=config['es_timeout'])
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
# add doc id to file_id_list
file_id_list.append(hit['_id'])
# add file path info inc. tags to reindex_file_list
reindex_dict['file'].append((hit['_source']['path_parent'] +
'/' + hit['_source']['filename'],
hit['_source']['tag'],
hit['_source']['tag_custom']))
# get es scroll id
scroll_id = res['_scroll_id']
# use es scroll api
res = es.scroll(scroll_id=scroll_id, scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s files for %s' % (len(file_id_list), path))
# add file id's to delete_list
for i in file_id_list:
d = {
'_op_type': 'delete',
'_index': cliargs['index'],
'_type': 'file',
'_id': i
}
file_delete_list.append(d)
if len(file_delete_list) > 0:
# bulk delete files in es
logger.info('Bulk deleting files in es index')
index_bulk_add(es, file_delete_list, config, cliargs)
# directory doc search
if recursive:
data = {
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR '
'(path_parent: ' + newpathwildcard + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")',
'analyze_wildcard': 'true'
}
}
}
else:
data = {
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")'
}
}
}
logger.info('Searching for all directories in %s' % path)
# search es and start scroll
res = es.search(index=cliargs['index'], doc_type='directory', scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
# add directory doc id to dir_id_list
dir_id_list.append(hit['_id'])
# add directory path info inc. tags, filesize, items to reindex_dir_list
reindex_dict['directory'].append((hit['_source']['path_parent'] +
'/' + hit['_source']['filename'],
hit['_source']['tag'],
hit['_source']['tag_custom']))
# get es scroll id
scroll_id = res['_scroll_id']
# use es scroll api
res = es.scroll(scroll_id=scroll_id, scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s directories for %s' % (len(dir_id_list), path))
# add dir id's to delete_list
for i in dir_id_list:
d = {
'_op_type': 'delete',
'_index': cliargs['index'],
'_type': 'directory',
'_id': i
}
dir_delete_list.append(d)
if len(dir_delete_list) > 0:
# bulk delete directories in es
logger.info('Bulk deleting directories in es index')
index_bulk_add(es, dir_delete_list, config, cliargs)
return reindex_dict
def index_get_docs(cliargs, logger, doctype='directory', copytags=False, hotdirs=False,
index=None, path=None, sort=False, maxdepth=None, pathid=False):
"""This is the es get docs function.
It finds all docs (by doctype) in es and returns doclist
which contains doc id, fullpath and mtime for all docs.
If copytags is True will return tags from previous index.
If path is specified will return just documents in and under directory path.
If sort is True, will return paths in asc path order.
if pathid is True, will return dict with path and their id.
"""
data = _index_get_docs_data(index, cliargs, logger, doctype=doctype, path=path,
maxdepth=maxdepth, sort=sort)
# refresh index
es.indices.refresh(index)
# search es and start scroll
res = es.search(index=index, doc_type=doctype, scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
doclist = []
pathdict = {}
doccount = 0
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
fullpath = os.path.abspath(os.path.join(hit['_source']['path_parent'], hit['_source']['filename']))
if copytags:
doclist.append((fullpath, hit['_source']['tag'], hit['_source']['tag_custom'], doctype))
elif hotdirs:
doclist.append((hit['_id'], fullpath, hit['_source']['filesize'], hit['_source']['items'],
hit['_source']['items_files'], hit['_source']['items_subdirs']))
elif pathid:
rel_path = fullpath.replace(rootdir_path, ".")
pathdict[rel_path] = hit['_id']
else:
# convert es time to unix time format
mtime = time.mktime(datetime.strptime(
hit['_source']['last_modified'],
'%Y-%m-%dT%H:%M:%S').timetuple())
doclist.append((hit['_id'], fullpath, mtime, doctype))
doccount += 1
# use es scroll api
res = es.scroll(scroll_id=res['_scroll_id'], scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s %s docs' % (str(doccount), doctype))
if pathid:
return pathdict
else:
return doclist
def _index_get_docs_data(index, cliargs, logger, doctype='directory', path=None, maxdepth=None, sort=False):
if cliargs['copytags']:
logger.info('Searching for all %s docs with tags in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'tag', 'tag_custom'],
'query': {
'query_string': {
'query': 'tag:(NOT "") OR tag_custom:(NOT "")'
}
}
}
elif cliargs['hotdirs']:
logger.info('Searching for all %s docs in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'filesize', 'items', 'items_files', 'items_subdirs'],
'query': {
'match_all': {}
}
}
else:
if path is None:
if maxdepth is None:
logger.info('Searching for all %s docs in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'match_all': {}
}
}
else:
# depth at rootdir
num_sep = cliargs['rootdir'].count(os.path.sep)
n = num_sep + maxdepth - 1
regexp = '(/[^/]+){1,' + str(n) + '}|/?'
logger.info('Searching for all %s docs in %s (maxdepth %s)...', doctype, index, maxdepth)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'regexp': {'path_parent': regexp}
}
}
else:
# escape special characters
newpath = escape_chars(path)
# create wildcard string and check for / (root) path
if newpath == '\/':
newpathwildcard = '\/*'
else:
newpathwildcard = newpath + '\/*'
logger.info('Searching for all %s docs in %s for path %s...', doctype, index, path)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR '
'(path_parent: ' + newpathwildcard + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")',
}
}
}
if sort:
data['sort'] = [{'path_parent': {'order': 'desc'}}]
return data
def replace_path(path):
"""This is the replace path function.
It replaces paths and drive letters sent to bots.
"""
frompath = cliargs['replacepath'][0]
topath = cliargs['replacepath'][1]
path = path.replace(frompath, topath)
# change any windows path separators (for bots running in linux)
path = path.replace('\\', '/')
return path
def split_list(a, n):
"""Generator that splits list a evenly into n pieces
"""
if IS_PY3:
xrange = range
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))
def add_diskspace(index, logger, path):
"""This is the add disk space function.
It adds total, used, free and available
disk space for a path to es.
"""
try: # linux
statvfs = os.statvfs(path)
# Size of filesystem in bytes
total = statvfs.f_frsize * statvfs.f_blocks
# Actual number of free bytes
free = statvfs.f_frsize * statvfs.f_bfree
# Number of free bytes that ordinary users are allowed
# to use (excl. reserved space)
available = statvfs.f_frsize * statvfs.f_bavail
except AttributeError: # windows
import ctypes
total_bytes = ctypes.c_ulonglong(0)
free_bytes = ctypes.c_ulonglong(0)
available_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),
ctypes.pointer(available_bytes),
ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
total = total_bytes.value
free = free_bytes.value
available = available_bytes.value
if cliargs['replacepath']:
path = replace_path(path)
used = total - free
indextime_utc = datetime.utcnow().isoformat()
data = {
"path": path,
"total": total,
"used": used,
"free": free,
"available": available,
"indexing_date": indextime_utc
}
# add to es
logger.info('Adding disk space info to es index')
es.index(index=index, doc_type='diskspace', body=data)
def add_crawl_stats(es, index, path, crawltime, state):
"""This is the add crawl stats function.
It adds crawl stats info to es when crawl starts and finishes.
"""
data = {
"path": path,
"state": state, # running, finished_crawl, finished_dircalc
"crawl_time": round(crawltime, 6),
"indexing_date": datetime.utcnow().isoformat()
}
es.index(index=index, doc_type='crawlstat', body=data)
def dir_excluded(path, config, cliargs):
"""Return True if path in excluded_dirs set,
False if not in the list"""
name = os.path.basename(path)
# return if directory in included list (whitelist)
if name in config['included_dirs'] or path in config['included_dirs']:
return False
# skip any dirs in excluded_dirs
if name in config['excluded_dirs'] or path in config['excluded_dirs']:
if cliargs['verbose']:
logger.info('Skipping (excluded dir) %s', path)
return True
# skip any dirs which start with . (dot) and in excluded_dirs
if name.startswith('.') and u'.*' in config['excluded_dirs']:
if cliargs['verbose']:
logger.info('Skipping (.* dir) %s', path)
return True
# skip any dirs that are found in reg exp checks including wildcard searches
found_dir = False
found_path = False
for d in config['excluded_dirs']:
if d == '.*':
continue
if d.startswith('*') and d.endswith('*'):
d = d.replace('*', '')
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
elif d.startswith('*'):
d = d + '$'
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
elif d.endswith('*'):
d = '^' + d
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
else:
if d == name:
found_dir = True
break
elif d == path:
found_path = True
break
if found_dir or found_path:
if cliargs['verbose']:
logger.info('Skipping (excluded dir) %s', path)
return True
return False
def escape_chars(text):
"""This is the escape special characters function.
It returns escaped path strings for es queries.
"""
# escape any backslash chars
text = text.replace('\\', '\\\\')
# escape any characters in chr_dict
chr_dict = {'\n': '\\n', '\t': '\\t',
'/': '\\/', '(': '\\(', ')': '\\)', '[': '\\[', ']': '\\]', '$': '\\$',
' ': '\\ ', '&': '\\&', '<': '\\<', '>': '\\>', '+': '\\+', '-': '\\-',
'|': '\\|', '!': '\\!', '{': '\\{', '}': '\\}', '^': '\\^', '~': '\\~',
'?': '\\?', ':': '\\:', '=': '\\=', '\'': '\\\'', '"': '\\"', '@': '\\@',
'.': '\\.', '#': '\\#', '*': '\\*'}
def char_trans(text, chr_dict):
for key, value in chr_dict.items():
text = text.replace(key, value)
return text
if IS_PY3:
text_esc = text.translate(str.maketrans(chr_dict))
else:
text_esc = char_trans(text, chr_dict)
return text_esc
def get_time(seconds):
"""This is the get time function
It returns human readable time format for stats.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "%dd:%dh:%02dm:%02ds" % (d, h, m, s)
def convert_size(size_bytes):
"""This is the convert size function
It returns human readable file sizes.
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def parse_cli_args(indexname):
"""This is the parse CLI arguments function.
It parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--rootdir", metavar='ROOTDIR', default=".",
help="Directory to start crawling from (default: .)")
parser.add_argument("-m", "--mtime", metavar='DAYS', default=0, type=int,
help="Minimum (+num) / maximum (-num) days ago for file modified time (default: 0)")
parser.add_argument("-s", "--minsize", metavar='BYTES', default=1, type=int,
help="Minimum file size in Bytes (default: 1 Bytes)")
parser.add_argument("-e", "--indexemptydirs", action="store_true",
help="Index empty directories (default: don't index)")
parser.add_argument("-i", "--index", default=indexname,
help="Elasticsearch index name (default: from config)")
parser.add_argument("-M", "--maxdepth", type=int, default=None,
help="Maximum directory depth to crawl (default: None)")
parser.add_argument("-c", "--maxdcdepth", type=int, default=None,
help="Maximum directory depth to calculate directory sizes/items (default: None)")
parser.add_argument("-b", "--batchsize", type=int, default=50,
help="Batch size (dir count) for sending to worker bots (default: 50)")
parser.add_argument("-a", "--adaptivebatch", action="store_true",
help="Adaptive batch size for sending to worker bots (intelligent crawl)")
parser.add_argument("-T", "--walkthreads", type=int, default=cpu_count()*2,
help="Number of threads for treewalk (default: cpu core count x 2)")
parser.add_argument("-A", "--autotag", action="store_true",
help="Get bots to auto-tag files/dirs based on patterns in config")
parser.add_argument("-S", "--sizeondisk", action="store_true",
help="Store size on disk (disk usage size) using block count x blocksize instead of file size")
parser.add_argument("-B", "--blocksize", type=int, metavar='BLOCKSIZE', default=512,
help="Blocksize (in bytes) used for --sizeondisk (default: 512)")
parser.add_argument("-O", "--optimizeindex", action="store_true",
help="Optimize index at end of crawl (reduce size)")
parser.add_argument("-r", "--reindex", action="store_true",
help="Reindex directory (non-recursive), data is added to existing index")
parser.add_argument("-R", "--reindexrecurs", action="store_true",
help="Reindex directory and all subdirs (recursive), data is added to existing index")
parser.add_argument("-F", "--forcedropexisting", action="store_true",
help="Silently drop an existing index (if present)")
parser.add_argument("-D", "--finddupes", action="store_true",
help="Find duplicate files in existing index and update their dupe_md5 field")
parser.add_argument("-C", "--copytags", metavar='INDEX2',
help="Copy tags from index2 to index")
parser.add_argument("-H", "--hotdirs", metavar='INDEX2',
help="Find hot dirs by calculating change percents from index2 (prev index) and update \
change_percent fields in index")
parser.add_argument("-l", "--listen", action="store_true",
help="Start tcp socket server and listen for remote commands")
parser.add_argument("-L", "--listentwc", action="store_true",
help="Start tcp socket server and listen for messages from diskover treewalk client")
parser.add_argument("--twcport", type=int, metavar='PORT',
help="Port number for tree walk client socket server (default: from config)")
parser.add_argument("--dirsonly", action="store_true",
help="Don't include files in batch sent to bots, only send dirs, bots scan for files")
parser.add_argument("--replacepath", nargs=2, metavar="PATH",
help="Replace path, example: --replacepath Z:\\ /mnt/share/")
parser.add_argument("--crawlapi", action="store_true",
help="Use storage Restful API instead of scandir")
parser.add_argument("--storagent", metavar='HOST', nargs='+',
help="Use diskover Storage Agent instead of scandir")
parser.add_argument("--dircalcsonly", action="store_true",
help="Calculate sizes and item counts for each directory doc in existing index \
(done automatically after each crawl)")
parser.add_argument("--gourcert", action="store_true",
help="Get realtime crawl data from ES for gource")
parser.add_argument("--gourcemt", action="store_true",
help="Get file mtime data from ES for gource")
parser.add_argument("-q", "--quiet", action="store_true",
help="Runs with no output")
parser.add_argument("-v", "--verbose", action="store_true",
help="Increase output verbosity")
parser.add_argument("--debug", action="store_true",
help="Debug message output")
parser.add_argument("--listplugins", action="store_true",
help="List plugins")
parser.add_argument("-V", "--version", action="version",
version="diskover v%s" % version,
help="Prints version and exits")
args = parser.parse_args()
if args.index:
args.index = args.index.lower()
return args
def log_setup(cliargs):
"""This is the log set up function.
It configures log output for diskover.
"""
diskover_logger = logging.getLogger('diskover')
diskover_logger.setLevel(logging.INFO)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.WARNING)
urllib3_logger = logging.getLogger('urllib3')
urllib3_logger.setLevel(logging.WARNING)
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.WARNING)
logging.addLevelName(
logging.INFO, "\033[1;32m%s\033[1;0m"
% logging.getLevelName(logging.INFO))
logging.addLevelName(
logging.WARNING, "\033[1;31m%s\033[1;0m"
% logging.getLevelName(logging.WARNING))
logging.addLevelName(
logging.ERROR, "\033[1;41m%s\033[1;0m"
% logging.getLevelName(logging.ERROR))
logging.addLevelName(
logging.DEBUG, "\033[1;33m%s\033[1;0m"
% logging.getLevelName(logging.DEBUG))
logformatter = '%(asctime)s [%(levelname)s][%(name)s] %(message)s'
loglevel = logging.INFO
logging.basicConfig(format=logformatter, level=loglevel)
if cliargs['verbose']:
diskover_logger.setLevel(logging.INFO)
es_logger.setLevel(logging.INFO)
urllib3_logger.setLevel(logging.INFO)
requests_logger.setLevel(logging.INFO)
if cliargs['debug']:
diskover_logger.setLevel(logging.DEBUG)
es_logger.setLevel(logging.DEBUG)
urllib3_logger.setLevel(logging.DEBUG)
requests_logger.setLevel(logging.DEBUG)
if cliargs['quiet']:
diskover_logger.disabled = True
es_logger.disabled = True
urllib3_logger.disabled = True
requests_logger.disabled = True
return diskover_logger
def progress_bar(event):
if event == 'Checking' or event == 'Calculating':
widgets = [progressbar.AnimatedMarker(), ' ', event + ' (Queue: ', progressbar.Counter(), ') ', progressbar.Timer()]
bar = progressbar.ProgressBar(widgets=widgets, max_value=progressbar.UnknownLength)
else:
widgets = [event + ' ', progressbar.Bar(), progressbar.Percentage(),
' (', progressbar.Timer(), ', ', progressbar.ETA(), ')']
bar = progressbar.ProgressBar(widgets=widgets, max_value=100)
return bar
def adaptive_batch(q, cliargs, batchsize):
"""This is the adaptive batch function.
It auto adjusts the batch size sent to rq.
Could be made better :)
"""
q_len = len(q)
if q_len == 0:
if (batchsize - ab_step) >= ab_start:
batchsize = batchsize - ab_step
elif q_len > 0:
if (batchsize + ab_step) <= ab_max:
batchsize = batchsize + ab_step
cliargs['batchsize'] = batchsize
return batchsize
def calc_dir_sizes(cliargs, logger, path=None):
from diskover_bot_module import calc_dir_size
jobcount = 0
# max depth to calc dir sizes
maxdepth = cliargs['maxdcdepth']
index = cliargs['index']
try:
# wait for worker bots to be idle and all queues are empty
logger.info('Waiting for diskover worker bots to be done with any jobs in rq...')
while worker_bots_busy([q, q_crawl, q_calc]):
time.sleep(1)
if cliargs['adaptivebatch']:
batchsize = ab_start
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
# use generator and yield docs while scrolling index in es
logger.info('Getting diskover bots to calculate directory sizes (maxdepth %s)...' % maxdepth)
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
bar = progress_bar('Calculating')
bar.start()
else:
bar = None
data = _index_get_docs_data(index, cliargs, logger, path=path, maxdepth=maxdepth)
# refresh index
es.indices.refresh(index)
starttime = time.time()
# search es and start scroll
res = es.search(index=index, doc_type='directory', scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
dirlist = []
dircount = 0
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
fullpath = os.path.join(hit['_source']['path_parent'], hit['_source']['filename'])
# convert es time to unix time format
mtime = time.mktime(datetime.strptime(hit['_source']['last_modified'],
'%Y-%m-%dT%H:%M:%S').timetuple())
atime = time.mktime(datetime.strptime(hit['_source']['last_access'],
'%Y-%m-%dT%H:%M:%S').timetuple())
ctime = time.mktime(datetime.strptime(hit['_source']['last_change'],
'%Y-%m-%dT%H:%M:%S').timetuple())
dirlist.append((hit['_id'], fullpath, mtime, atime, ctime))
dircount += 1
dirlist_len = len(dirlist)
if dirlist_len >= batchsize:
q_calc.enqueue(calc_dir_size, args=(dirlist, cliargs,), result_ttl=config['redis_ttl'])
jobcount += 1
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (dirlist_len, batchsize))
del dirlist[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
# update progress bar
if bar:
try:
bar.update(len(q_calc))
except (ZeroDivisionError, ValueError):
bar.update(0)
# use es scroll api
res = es.scroll(scroll_id=res['_scroll_id'], scroll='1m',
request_timeout=config['es_timeout'])
# enqueue dir calc job for any remaining in dirlist
if len(dirlist) > 0:
q_calc.enqueue(calc_dir_size, args=(dirlist, cliargs,), result_ttl=config['redis_ttl'])
jobcount += 1
logger.info('Found %s directory docs' % str(dircount))
# set up progress bar with time remaining
if bar:
bar.finish()
bar_max_val = len(q_calc)
bar = progressbar.ProgressBar(max_value=bar_max_val)
bar.start()
# update progress bar until all worker bots are idle and q_calc queue is empty
while worker_bots_busy([q_calc]):
if bar:
q_len = len(q_calc)
try:
bar.update(bar_max_val - q_len)
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
elapsed = get_time(time.time() - starttime)
logger.info('Finished calculating %s directory sizes in %s' % (dircount, elapsed))
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def scandirwalk_worker(threadn, cliargs, logger):
dirs = []
nondirs = []
# check if we are using storage agent and make connection
if cliargs['storagent']:
stor_agent = True
hostlist = cliargs['storagent']
stor_agent_conn = diskover_agent.AgentConnection(hosts=hostlist)
stor_agent_conn.connect()
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] Connected to Storage Agent host: %s" % (threadn, stor_agent_conn.conn_host()))
else:
stor_agent = False
while True:
path = q_paths.get()
try:
q_paths_in_progress.put(path)
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] scandirwalk_worker: %s" % (threadn, path))
if cliargs['crawlapi']:
root, api_dirs, api_nondirs = api_listdir(path, api_ses)
path = root
for d in api_dirs:
if not dir_excluded(d[0], config, cliargs):
dirs.append(d)
if not cliargs['dirsonly']:
for f in api_nondirs:
nondirs.append(f)
del api_dirs[:]
del api_nondirs[:]
elif stor_agent:
# grab dir list from storage agent server
dir_list = stor_agent_conn.listdir(path)
logger.debug("[thread-%s] scandirwalk_worker: Storage Agent host response time: %s" % (threadn, stor_agent_conn.response_time()))
path, dirs_noexcl, nondirs = dir_list
for d in dirs_noexcl:
if not dir_excluded(d, config, cliargs):
dirs.append(d)
else:
item_count = 0
for entry in scandir(path):
if entry.is_dir(follow_symlinks=False) and not dir_excluded(entry.path, config, cliargs):
dirs.append(entry.name)
elif entry.is_file(follow_symlinks=False) and not cliargs['dirsonly']:
nondirs.append(entry.name)
if item_count == 100000:
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] scandirwalk_worker: processing directory with many files: %s" % (threadn, path))
else:
item_count += 1
q_paths_results.put((path, dirs[:], nondirs[:]))
except (OSError, IOError) as e:
logger.warning("[thread-%s] OS/IO Exception caused by: %s" % (threadn, e))
pass
except UnicodeDecodeError as e:
logger.warning("[thread-%s] Unicode Decode Exception caused by: %s (path: %s)" % (threadn, e, path))
pass
except Exception as e:
logger.error("[thread-%s] Exception caused by: %s" % (threadn, e))
raise
finally:
q_paths_in_progress.get()
del dirs[:]
del nondirs[:]
q_paths.task_done()
def scandirwalk(path, cliargs, logger):
q_paths.put(path)
while True:
entry = q_paths_results.get()
root, dirs, nondirs = entry
if cliargs['debug'] or cliargs['verbose']:
if cliargs['crawlapi']:
logger.info("apiwalk: %s (dircount: %s, filecount: %s)" % (root[0], str(len(dirs)), str(len(nondirs))))
else:
logger.info("scandirwalk: %s (dircount: %s, filecount: %s)" % (root, str(len(dirs)), str(len(nondirs))))
# yield before recursion
yield root, dirs, nondirs
# recurse into subdirectories
if cliargs['crawlapi']:
for d in dirs:
q_paths.put(d[0])
else:
for name in dirs:
new_path = os.path.join(root, name)
q_paths.put(new_path)
q_paths_results.task_done()
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0:
time.sleep(.5)
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0 and q_paths_in_progress.qsize() == 0:
break
def treewalk(top, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the tree walk function.
It walks the tree and adds tuple of directory and it's items
to redis queue for rq worker bots to scrape meta and upload
to ES index after batch size (dir count) has been reached.
"""
from diskover_bot_module import scrape_tree_meta
batch = []
dircount = 0
totaldirs = 0
totalfiles = 0
starttime = time.time()
# set up threads for tree walk
for i in range(cliargs['walkthreads']):
t = Thread(target=scandirwalk_worker, args=(i, cliargs, logger,))
t.daemon = True
t.start()
# set up progress bar
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
widgets = [progressbar.AnimatedMarker(), ' Crawling (Queue: ', progressbar.Counter(),
progressbar.FormatLabel(''), ') ', progressbar.Timer()]
bar = progressbar.ProgressBar(widgets=widgets, max_value=progressbar.UnknownLength)
bar.start()
else:
bar = None
bartimestamp = time.time()
for root, dirs, files in scandirwalk(top, cliargs, logger):
dircount += 1
totaldirs += 1
files_len = len(files)
dirs_len = len(dirs)
# check for empty dirs
if not cliargs['indexemptydirs'] and not cliargs['dirsonly']:
if dirs_len == 0 and files_len == 0:
if cliargs['debug'] or cliargs['verbose']:
logger.info("skipping empty dir: %s" % root)
continue
totalfiles += files_len
# replace path if cliarg
if cliargs['replacepath']:
root = replace_path(root)
if cliargs['dirsonly']:
batch.append((root, dirs))
else:
batch.append((root, dirs, files))
batch_len = len(batch)
if batch_len >= batchsize or (cliargs['adaptivebatch'] and totalfiles >= config['adaptivebatch_maxfiles']):
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,),
result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (batch_len, batchsize))
del batch[:]
totalfiles = 0
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
# check if at maxdepth level and delete dirs/files lists to not
# descend further down the tree
if cliargs['maxdepth']:
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
del files[:]
# update progress bar
if bar:
try:
if time.time() - bartimestamp >= 2:
elapsed = round(time.time() - bartimestamp, 3)
dirspersec = round(dircount / elapsed, 3)
widgets[4] = progressbar.FormatLabel(', ' + str(dirspersec) + ' dirs/sec) ')
bartimestamp = time.time()
dircount = 0
bar.update(len(q_crawl))
except (ZeroDivisionError, ValueError):
bar.update(0)
# add any remaining in batch to queue
if len(batch) > 0:
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,), result_ttl=config['redis_ttl'])
# set up progress bar with time remaining
if bar:
bar.finish()
bar_max_val = len(q_crawl)
bar = progressbar.ProgressBar(max_value=bar_max_val)
bar.start()
# update progress bar until bots are idle and queue is empty
while worker_bots_busy([q_crawl]):
if bar:
q_len = len(q_crawl)
try:
bar.update(bar_max_val - q_len)
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
elapsed = time.time() - starttime
dirspersec = round(totaldirs / elapsed, 3)
elapsed = get_time(elapsed)
logger.info("Finished crawling in %s, dirs walked %s (%s dirs/sec)" %
(elapsed, totaldirs, dirspersec))
def crawl_tree(path, cliargs, logger, reindex_dict):
"""This is the crawl tree function.
It sets up the directory tree walking.
"""
try:
wait_for_worker_bots(logger)
logger.info('Enqueueing crawl to diskover worker bots for %s...', path)
if cliargs['autotag']:
logger.info("Worker bots set to auto-tag (-A)")
if cliargs['sizeondisk']:
logger.info("Storing on disk size instead of file size using a blocksize of %s (-S)" % cliargs['blocksize'])
if cliargs['adaptivebatch']:
batchsize = ab_start
cliargs['batchsize'] = batchsize
logger.info("Sending adaptive batches to worker bots (-a)")
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
logger.info("Sending batches of %s to worker bots", batchsize)
if batchsize < 50:
logger.warning("Using a small batch size can decrease performance")
# set maxdepth level to 1 if reindex
if cliargs['reindex']:
level = 1
cliargs['maxdepth'] = 1
else:
level = cliargs['maxdepth']
# set current depth
num_sep = path.count(os.path.sep)
# check for listenlwc socket cli flag to start socket server
if cliargs['listentwc']:
from diskover_socket_server import start_socket_server_twc
starttime = start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict)
return starttime
starttime = time.time()
logger.info("Starting crawl using %s treewalk threads (maxdepth %s)" % (cliargs['walkthreads'], cliargs['maxdepth']))
# start tree walking
treewalk(path, num_sep, level, batchsize, cliargs, logger, reindex_dict)
return starttime
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def hotdirs():
from diskover_bot_module import calc_hot_dirs
"""This is the calculate hot dirs function.
"""
logger.info('Getting diskover bots to calculate change percent '
'for directories from %s to %s',
cliargs['hotdirs'], cliargs['index'])
# look in index for all directory docs and add to queue
dirlist = index_get_docs(cliargs, logger, doctype='directory', hotdirs=True, index=cliargs['index'])
dirbatch = []
if cliargs['adaptivebatch']:
batchsize = ab_start
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
for d in dirlist:
dirbatch.append(d)
if len(dirbatch) >= batchsize:
q.enqueue(calc_hot_dirs, args=(dirbatch, cliargs,), result_ttl=config['redis_ttl'])
del dirbatch[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q, cliargs, batchsize)
# add any remaining in batch to queue
q.enqueue(calc_hot_dirs, args=(dirbatch, cliargs,), result_ttl=config['redis_ttl'])
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
bar = progress_bar('Checking')
bar.start()
else:
bar = None
# update progress bar until all bots are idle and q queue is empty
while worker_bots_busy([q]):
if bar:
try:
bar.update(len(q))
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
def worker_bots_busy(queues):
"""This is the worker bots busy function.
It returns True when bots are busy and queues have jobs,
else returns False when bots are all idle and queues are empty.
"""
workers_busy = False
workers = SimpleWorker.all(connection=redis_conn)
for worker in workers:
if worker._state == "busy":
workers_busy = True
break
q_len = 0
running_jobs = 0
for qname in queues:
q_len += len(qname)
r = StartedJobRegistry(queue=qname)
running_job_ids = r.get_job_ids()
running_jobs += len(running_job_ids)
if q_len == 0 and running_jobs == 0 and workers_busy == False:
return False
else:
return True
def wait_for_worker_bots(logger):
"""This is the wait for worker bots function.
It loops waiting for worker bots to start.
"""
workers = SimpleWorker.all(connection=redis_conn)
while len(workers) == 0:
logger.info('Waiting for diskover worker bots to start...')
time.sleep(2)
workers = SimpleWorker.all(connection=redis_conn)
logger.info('Found %s diskover RQ worker bots', len(workers))
def tune_es_for_crawl(defaults=False):
"""This is the tune es for crawl function.
It optimizes ES for crawling based on config settings and after crawl is over
sets back to defaults.
"""
if config['disable_replicas'] == 'true':
replicas = 0
else:
replicas = config['index_replicas']
default_settings = {
"index": {
"refresh_interval": "1s",
"number_of_replicas": config['index_replicas'],
"translog.flush_threshold_size": "512mb"
}
}
tuned_settings = {
"index": {
"refresh_interval": config['index_refresh'],
"number_of_replicas": replicas,
"translog.flush_threshold_size": config['index_translog_size']
}
}
if not defaults:
logger.info("Tuning ES index settings for crawl")
es.indices.put_settings(index=cliargs['index'], body=tuned_settings,
request_timeout=config['es_timeout'])
else:
logger.info("Setting ES index settings back to defaults")
es.indices.put_settings(index=cliargs['index'], body=default_settings,
request_timeout=config['es_timeout'])
# set logging level for es to ERROR to not output any warnings about timeouts for index optimizing
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logger.info("Force merging ES index...")
es.indices.forcemerge(index=cliargs['index'], request_timeout=config['es_timeout'])
# check if we should optimize index
if cliargs['optimizeindex']:
logger.info('Optimizing ES index... this could take a while... (-O)')
try:
es.indices.forcemerge(index=cliargs['index'], max_num_segments=1, request_timeout=config['es_timeout'])
except exceptions.ConnectionTimeout:
logger.info("Optimizing timed out, will finish in background")
pass
def post_crawl_tasks():
"""This is the post crawl tasks function.
It runs at the end of the crawl and does post tasks.
"""
# add elapsed time crawl stat to es
add_crawl_stats(es, cliargs['index'], rootdir_path, (time.time() - starttime), "finished_crawl")
# calculate directory sizes and items
if cliargs['reindex'] or cliargs['reindexrecurs']:
calc_path = rootdir_path
else:
calc_path = None
calc_dir_sizes(cliargs, logger, path=calc_path)
# add elapsed time crawl stat to es
add_crawl_stats(es, cliargs['index'], rootdir_path, (time.time() - starttime), "finished_dircalc")
if cliargs['reindex'] or cliargs['reindexrecurs']:
# wait for worker bots to be idle and all queues are empty
logger.info('Waiting for diskover worker bots to be done with any jobs in rq...')
while worker_bots_busy([q, q_crawl, q_calc]):
time.sleep(1)
# set Elasticsearch index settings back to default
tune_es_for_crawl(defaults=True)
def pre_crawl_tasks():
# create Elasticsearch index
index_create(cliargs['index'])
# add crawl stat to index
add_crawl_stats(es, cliargs['index'], rootdir_path, 0, "running")
# optimize Elasticsearch index settings for crawling
tune_es_for_crawl()
# add disk space info to es index
if not cliargs['reindex'] and not cliargs['reindexrecurs']:
if cliargs['crawlapi']:
from diskover_crawlapi import api_add_diskspace
api_add_diskspace(es, cliargs['index'], rootdir_path, api_ses, logger)
else:
add_diskspace(cliargs['index'], logger, rootdir_path)
# load config file into config dictionary
config, configfile = load_config()
# set adaptive batch sizes from config
ab_start = config['adaptivebatch_startsize']
ab_max = config['adaptivebatch_maxsize']
ab_step = config['adaptivebatch_stepsize']
# load any available plugins
plugins = load_plugins()
import diskover_connections
# create Elasticsearch connection
diskover_connections.connect_to_elasticsearch()
from diskover_connections import es_conn as es
from diskover_connections import exceptions
# create Reddis connection
diskover_connections.connect_to_redis()
from diskover_connections import redis_conn
# Redis queue names
listen = [config['redis_queue'], config['redis_queue_crawl'], config['redis_queue_calcdir']]
# set up Redis q
q = Queue(listen[0], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
q_crawl = Queue(listen[1], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
q_calc = Queue(listen[2], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
# queue for paths
q_paths = PyQueue()
q_paths_results = PyQueue()
q_paths_in_progress = PyQueue()
lock = Lock()
if __name__ == "__main__":
# parse cli arguments into cliargs dictionary
cliargs = vars(parse_cli_args(config['index']))
# set up logging
logger = log_setup(cliargs)
if not cliargs['quiet'] and not cliargs['gourcert'] and not cliargs['gourcemt']:
# print random banner
print_banner(version)
logger.info("Using config file: %s" % configfile)
# list plugins
if cliargs['listplugins']:
print("diskover plugins:")
list_plugins()
sys.exit(0)
# run just dir calcs if cli arg
if cliargs['dircalcsonly']:
calc_dir_sizes(cliargs, logger)
sys.exit(0)
try:
# check index name
if cliargs['index'] == "diskover" or \
cliargs['index'].split('-')[0] != "diskover":
print('Please name your index: diskover-<string>')
sys.exit(1)
except IndexError:
print('Please name your index: diskover-<string>')
sys.exit(1)
# check for listen socket cli flag to start socket server
if cliargs['listen']:
from diskover_socket_server import start_socket_server
start_socket_server(cliargs, logger)
sys.exit(0)
# check for gource cli flags
if cliargs['gourcert'] or cliargs['gourcemt']:
try:
from diskover_gource import gource
gource(es, cliargs)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, exiting')
sys.exit(0)
# tag duplicate files if cli argument
if cliargs['finddupes']:
from diskover_dupes import dupes_finder
wait_for_worker_bots(logger)
# Set up worker threads for duplicate file checker queue
dupes_finder(es, q, cliargs, logger)
logger.info('DONE checking for dupes! Sayonara!')
sys.exit(0)
# copy tags from index2 to index if cli argument
if cliargs['copytags']:
from diskover_bot_module import tag_copier
wait_for_worker_bots(logger)
logger.info('Copying tags from %s to %s', cliargs['copytags'], cliargs['index'])
# look in index2 for all directory docs with tags and add to queue
dirlist = index_get_docs(cliargs, logger, doctype='directory', copytags=True, index=cliargs['copytags'])
for path in dirlist:
q.enqueue(tag_copier, args=(path, cliargs,), result_ttl=config['redis_ttl'])
# look in index2 for all file docs with tags and add to queue
filelist = index_get_docs(cliargs, logger, doctype='file', copytags=True, index=cliargs['copytags'])
for path in filelist:
q.enqueue(tag_copier, args=(path, cliargs,), result_ttl=config['redis_ttl'])
if len(dirlist) == 0 and len(filelist) == 0:
logger.info('No tags to copy')
else:
logger.info('Worker bots copying tags in background')
logger.info('Dispatcher is DONE! Sayonara!')
sys.exit(0)
# Calculate directory change percent from index2 to index if cli argument
if cliargs['hotdirs']:
wait_for_worker_bots(logger)
hotdirs()
logger.info('DONE finding hotdirs! Sayonara!')
sys.exit(0)
# print plugins
plugins_list = ""
for i in get_plugins_info():
plugins_list = plugins_list + i["name"] + " "
if plugins:
logger.info("Plugins loaded: %s", plugins_list)
# check if rootdir exists
if cliargs['crawlapi']:
if cliargs['rootdir'] == '.' or cliargs['rootdir'] == "":
logger.error("Rootdir path missing, use -d /rootdir, exiting")
sys.exit(1)
from diskover_crawlapi import api_connection, api_stat, api_listdir
logger.info('Connecting to file system storage api at %s... (--crawlapi)' % config['api_url'])
api_ses = api_connection()
logger.info('Connected to storage api')
# check using storage api
try:
api_stat(cliargs['rootdir'], api_ses)
except ValueError as e:
logger.error("Rootdir path not found or not a directory, exiting (%s)" % e)
sys.exit(1)
elif cliargs['storagent']:
try:
import diskover_agent
except ImportError:
logger.error("Missing diskover_agent.py module, exiting")
sys.exit(1)
else:
# warn if not running as root (linux) or Administrator (windows)
try:
is_admin = os.geteuid() == 0
user = "root"
except AttributeError: # windows
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
user = "Administrator"
if not is_admin:
logger.warning('Not running as %s, permissions might block crawling some files' % user)
if not os.path.exists(cliargs['rootdir']) or not \
os.path.isdir(cliargs['rootdir']):
logger.error("Rootdir path not found or not a directory, exiting")
sys.exit(1)
logger.debug('Excluded dirs: %s', config['excluded_dirs'])
# set rootdir_path to absolute path
rootdir_path = os.path.abspath(cliargs['rootdir'])
# remove any trailing slash unless root /
if rootdir_path != '/':
rootdir_path = rootdir_path.rstrip(os.path.sep)
# check exclude
if dir_excluded(rootdir_path, config, cliargs):
logger.info("Directory in exclude list, exiting")
sys.exit(0)
cliargs['rootdir'] = rootdir_path
# convert to unicode if python2
if not IS_PY3:
rootdir_path = unicode(rootdir_path)
# warn if indexing 0 Byte empty files
if cliargs['minsize'] == 0:
logger.warning('You are indexing 0 Byte empty files (-s 0)')
# check if we are reindexing and remove existing docs in Elasticsearch
# before crawling and reindexing
reindex_dict = {'file': [], 'directory': []}
if cliargs['reindex']:
reindex_dict = index_delete_path(rootdir_path, cliargs, logger, reindex_dict)
elif cliargs['reindexrecurs']:
reindex_dict = index_delete_path(rootdir_path, cliargs, logger, reindex_dict, recursive=True)
pre_crawl_tasks()
# start crawling
starttime = crawl_tree(rootdir_path, cliargs, logger, reindex_dict)
post_crawl_tasks()
logger.info('All DONE! Sayonara!')
|
zip.py | import os
import shutil
import threading
import zipfile
from datetime import datetime
def zip_dir(src_path, dest_path):
if dest_path.endswith('.zip'): dest_path = dest_path[:-4]
threading.Thread(target=shutil.make_archive, args=(dest_path, 'zip', src_path)).start() # Zip dir in its own thread
def unzip_dir(src_path, dest_path):
shutil.unpack_archive(src_path, dest_path, 'zip')
for root, dirs, files in os.walk(dest_path):
for d in dirs: os.chmod(os.path.join(root, d), 0o777)
for f in files: os.chmod(os.path.join(root, f), 0o777)
def list_files(zip_path):
files = []
for f in zipfile.ZipFile(zip_path).infolist():
if not f.filename.startswith('.'):
files.append({'filename': f.filename,
'size': sizeof_fmt(f.file_size),
'modified': str(datetime(*f.date_time))})
return files
def sizeof_fmt(num, suffix='B'):
for unit in [' ',' K',' M',' G',' T',' P',' E',' Z']:
if abs(num) < 1024.0:
return "%3.0f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) |
base_repository_test.py | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the base repository classes."""
from builtins import range
import datetime
import threading
import unittest
from googleapiclient import discovery
from googleapiclient import http
import unittest.mock as mock
import google.auth
from google.oauth2 import credentials
from google.oauth2 import service_account
from tests import unittest_utils
from google.cloud import forseti as forseti_security
from google.cloud.forseti.common.gcp_api import _base_repository as base
from google.cloud.forseti.common.gcp_api import _supported_apis
class BaseRepositoryTest(unittest_utils.ForsetiTestCase):
"""Test the Base Repository methods."""
def get_test_credential(self):
access_token = 'foo'
client_id = 'some_client_id'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_uri = 'https://example.com/oauth2/token'
token_expiry = datetime.datetime.utcnow()
user_agent = ''
creds = credentials.Credentials(
access_token, refresh_token=refresh_token,
token_uri=token_uri, client_id=client_id,
client_secret=client_secret, scopes=['foo'])
creds.expiry = token_expiry
return creds
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def get_test_service_account(self, mock_signer):
keyfile_dict = {
'type': 'service_account',
'client_email': 'test@service.account',
'private_key': '12345',
'private_key_id': '12345',
'client_id': '123',
'token_uri': 'https://example.com/oauth2/token'}
creds = (
service_account.Credentials.from_service_account_info(
keyfile_dict))
return creds
def test_build_http(self):
"""Verify set user agent sets the user agent correctly."""
http_mock = http.HttpMock()
h = base.http_helpers.build_http(http=http_mock)
_ = h.request('http://test.foo', 'GET')
self.assertTrue(
forseti_security.__package_name__ in
h.headers.get('user-agent'))
def test_build_http_multiple(self):
"""Verify set user agent sets the user agent only once."""
http_mock = http.HttpMock()
h = base.http_helpers.build_http(http=http_mock)
for _ in range(5):
h = base.http_helpers.build_http(http=h)
_ = h.request('http://test.foo', 'GET')
user_agent = h.headers.get('user-agent')
forseti_agent_count = user_agent.count(
forseti_security.__package_name__)
self.assertEqual(1, forseti_agent_count)
def test_set_scopes(self):
creds = self.get_test_service_account()
self.assertTrue(creds.requires_scopes)
scoped_creds = base.with_scopes_if_required(
creds, list(base.CLOUD_SCOPES))
self.assertFalse(scoped_creds.requires_scopes)
@mock.patch.object(discovery, 'build', autospec=True)
def test_forseti_supported_api_is_ok(
self,
mock_discovery_build):
"""Test that Forseti-supported API in BaseClient.__init__() works.
Args:
mock_discovery_build (Mock): Mock object.
Setup:
* Pick one of the supported APIs.
* Instantiate the Base Client with just the API name.
Expect:
* The resulting API client service has the same API name and
version as the supported API.
"""
api_name = list(_supported_apis.SUPPORTED_APIS.keys())[0]
supported_api = _supported_apis.SUPPORTED_APIS[api_name]
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials)
self.assertEqual((api_name, [supported_api['default_version']]),
(repo_client.name, repo_client.versions))
@mock.patch.object(discovery, 'build', autospec=True)
@mock.patch.object(base, 'LOGGER', autospec=True)
def test_forseti_unsupported_valid_version_is_ok(
self,
mock_logger,
mock_discovery_build):
"""Test that Forseti-supported API with unsupported valid version is ok.
Args:
mock_logger (Mock): Mock objects.
mock_discovery_build (Mock): Mock object.
Setup:
* Pick one of the supported APIs.
* Pick a valid version (not officially supported by Forseti).
* Instantiate the Base Client with the API name and version.
Expect:
* Unsupported version will call LOGGER.warning().
"""
api_name = 'cloudresourcemanager'
self.assertTrue(api_name in _supported_apis.SUPPORTED_APIS)
provided_version = 'v1beta1'
self.assertFalse(
provided_version in
_supported_apis.SUPPORTED_APIS[api_name]['supported_versions'])
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials, versions=[provided_version])
self.assertEqual((api_name, [provided_version]),
(repo_client.name, repo_client.versions))
mock_logger.warning.assert_called_with(
mock.ANY, api_name, provided_version)
@mock.patch.object(discovery, 'build', autospec=True)
@mock.patch.object(base, 'LOGGER', autospec=True)
def test_forseti_unsupported_api_is_ok(
self,
mock_logger,
mock_discovery_build):
"""Test that unsupported API is ok.
Args:
mock_logger (Mock): Mock objects.
mock_discovery_build (Mock): Mock object.
Setup:
* Pick a non-supported API.
* Pick a valid version (not officially supported by Forseti).
* Instantiate the Base Client with the API name and version.
Expect:
* Unsupported API will call LOGGER.warning().
"""
api_name = 'zoo'
self.assertFalse(api_name in _supported_apis.SUPPORTED_APIS)
provided_versions = ['v1', 'v2']
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
api_name, credentials=mock_credentials, versions=provided_versions)
expected_repr = 'API: name=zoo, versions=[\'v1\', \'v2\']'
self.assertEqual(expected_repr, '%s' % repo_client)
mock_logger.warning.assert_called_with(
mock.ANY, api_name)
@mock.patch.object(discovery, 'build', autospec=True)
def test_init_repository_no_supported_version(self, mock_discovery_build):
"""Verify that _init_repository will pick a version if none provided."""
class ZooRepository(base.GCPRepository):
def __init__(self, **kwargs):
super(ZooRepository, self).__init__(component='a', **kwargs)
# Return a different mock object each time build is called.
mock_discovery_build.side_effect = [mock.Mock(), mock.Mock()]
mock_credentials = mock.MagicMock()
repo_client = base.BaseRepositoryClient(
'zoo', credentials=mock_credentials, versions=['v2', 'v1'])
repo = repo_client._init_repository(ZooRepository)
self.assertEqual(repo_client.gcp_services['v1'], repo.gcp_service)
self.assertNotEqual(repo_client.gcp_services['v2'], repo.gcp_service)
def test_multiple_threads_unique_http_objects(self):
"""Validate that each thread gets its unique http object.
At the core of this requirement is the fact that httplib2.Http is not
thread-safe. Therefore, it is the responsibility of the repo to maintain
a separate http object even if multiplethreads share it.
"""
def get_http(repo, result, i):
result[i] = repo.http
gcp_service_mock = mock.Mock()
credentials_mock = mock.Mock(spec=credentials.Credentials)
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=credentials_mock,
component='fake_component',
use_cached_http=True)
http_objects = [None] * 2
t1 = threading.Thread(target=get_http, args=(repo, http_objects, 0))
t2 = threading.Thread(target=get_http, args=(repo, http_objects, 1))
t1.start()
t2.start()
t1.join()
t2.join()
self.assertNotEqual(http_objects[0], http_objects[1])
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def test_no_cached_http_gets_different_http_objects(self, signer_factory):
"""Validate that each unique credential gets a unique http object.
At the core of this requirement is the fact that some API's require
distinctly scoped credentials, whereas the authenticated http object
is cached for all clients in the same thread.
"""
http_objects = [None] * 2
for i in range(2):
gcp_service_mock = mock.Mock()
fake_credentials = self.get_test_credential()
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=fake_credentials,
component='fake_component{}'.format(i),
use_cached_http=False)
http_objects[i] = repo.http
self.assertNotEqual(http_objects[0], http_objects[1])
@mock.patch('google.auth.crypt.rsa.RSASigner.from_string',
return_value=object())
def test_use_cached_http_gets_same_http_objects(self, signer_factory):
"""Different clients with the same credential get the same http object.
This verifies that a new http object is not created when two
repository clients use the same credentials object.
"""
fake_credentials = self.get_test_credential()
http_objects = [None] * 2
for i in range(2):
gcp_service_mock = mock.Mock()
repo = base.GCPRepository(
gcp_service=gcp_service_mock,
credentials=fake_credentials,
component='fake_component{}'.format(i),
use_cached_http=True)
http_objects[i] = repo.http
self.assertEqual(http_objects[0], http_objects[1])
if __name__ == '__main__':
unittest.main()
|
common.py | """Test the helper method for writing tests."""
from __future__ import annotations
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
from time import monotonic
import types
from typing import Any, Awaitable, Collection
from unittest.mock import AsyncMock, Mock, patch
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa: F401
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import BLOCK_LOG_TIMEOUT, State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.uuid as uuid_util
import homeassistant.util.yaml.loader as yaml_loader
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop, load_registries=True):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
async def async_wait_for_task_count(self, max_remaining_tasks: int = 0) -> None:
"""Block until at most max_remaining_tasks remain.
Based on HomeAssistant.async_block_till_done
"""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while len(self._pending_tasks) > max_remaining_tasks:
pending: Collection[Awaitable[Any]] = [
task for task in self._pending_tasks if not task.done()
]
self._pending_tasks.clear()
if len(pending) > max_remaining_tasks:
remaining_pending = await self._await_count_and_log_pending(
pending, max_remaining_tasks=max_remaining_tasks
)
self._pending_tasks.extend(remaining_pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
self._pending_tasks.extend(pending)
await asyncio.sleep(0)
async def _await_count_and_log_pending(
self, pending: Collection[Awaitable[Any]], max_remaining_tasks: int = 0
) -> Collection[Awaitable[Any]]:
"""Block at most max_remaining_tasks remain and log tasks that take a long time.
Based on HomeAssistant._await_and_log_pending
"""
wait_time = 0
return_when = asyncio.ALL_COMPLETED
if max_remaining_tasks:
return_when = asyncio.FIRST_COMPLETED
while len(pending) > max_remaining_tasks:
_, pending = await asyncio.wait(
pending, timeout=BLOCK_LOG_TIMEOUT, return_when=return_when
)
if not pending or max_remaining_tasks:
return pending
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
return []
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.async_wait_for_task_count = types.MethodType(async_wait_for_task_count, hass)
hass._await_count_and_log_pending = types.MethodType(
_await_count_and_log_pending, hass
)
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = {}
hass.config_entries._store._async_ensure_stop_listener = lambda: None
# Load the registries
if load_registries:
await asyncio.gather(
device_registry.async_load(hass),
entity_registry.async_load(hass),
area_registry.async_load(hass),
)
await hass.async_block_till_done()
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_, fire_all=False):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
registry._rebuild_index()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
disabled_by=None,
reason=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid_util.random_uuid_hex(),
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
"disabled_by": disabled_by,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
if reason is not None:
self.reason = reason
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries[self.entry_id] = self
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries[self.entry_id] = self
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(files_dict.keys(), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
async def async_init_recorder_component(hass, add_config=None):
"""Initialize the recorder asynchronously."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://"
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert await async_setup_component(
hass, recorder.DOMAIN, {recorder.DOMAIN: config}
)
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module, built_in=True):
"""Mock an integration."""
integration = loader.Integration(
hass,
f"{loader.PACKAGE_BUILTIN}.{module.DOMAIN}"
if built_in
else f"{loader.PACKAGE_CUSTOM_COMPONENTS}.{module.DOMAIN}",
None,
module.mock_manifest(),
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
server.py | import threading
from flask import Flask
from flask_cors import CORS
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.middleware.http_proxy import ProxyMiddleware
from werkzeug.serving import run_simple
import logging
from utils.logger import logger
from data.config import Constants
from start_modules import start_all
from others.file_static import app as app_file
from apis.api_main import app as app_api
# 只显示错误消息
logger_werkzeug = logging.getLogger('werkzeug')
logger_werkzeug.setLevel(logging.ERROR)
# logger_werkzeug.setLevel(logging.DEBUG)
host, port = Constants.RUN_LISTENING, Constants.RUN_PORT
shadow_port = port + 1
# 中间件
dm = DispatcherMiddleware(app_file, {Constants.API_PATH: app_api})
if Constants.RUN_FRONTEND_PROXY:
app_shadow = Flask(__name__ + "_shadow")
app_proxy = ProxyMiddleware(app_shadow, {
'/api/v1/': {
"target": f"http://localhost:8081/api/v1/"
},
'/react/': {
'target': "http://localhost:3000/"
},
'/static/': {
'target': "http://localhost:3000/static/"
}
})
else:
app_proxy = None
if __name__ == '__main__':
logger.info('starting modules:')
start_all()
ssl_context = Constants.SSL_CONTEXT
if Constants.RUN_FRONTEND_PROXY:
logger.info(f'server started at {host}:{port} (shadow at {host}:{shadow_port}) ssl={ssl_context}')
run_kwargs = {
'use_reloader': Constants.RUN_USE_RELOAD,
# 'ssl_context': 'adhoc'
'ssl_context': ssl_context
}
t1 = threading.Thread(target=run_simple, args=(host, shadow_port, dm), kwargs={
'use_reloader': Constants.RUN_USE_RELOAD
}, daemon=True)
t2 = threading.Thread(target=run_simple, args=(host, port, app_proxy), kwargs=run_kwargs, daemon=True)
for t in [t1, t2]:
t.start()
for t in [t1, t2]:
t.join()
else:
logger.info(f'server started at {host}:{port} ssl={ssl_context}')
run_simple(host, port, dm, use_reloader=Constants.RUN_USE_RELOAD, ssl_context=ssl_context)
|
sql_isolation_testcase.py | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
try:
import subprocess32 as subprocess
except:
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USIMq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
elif self.mode == "standby":
# Connect to standby even when it's role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port)
elif self.mode == "mirror":
# Connect to mirror even when it's role is recorded
# as mirror. This is useful for scenarios where a
# primary is marked down but could actually accept
# connection. This implies utility connection.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname, given_opt="-c gp_session_role=utility")
r = con.query(query).getresult()
con.close()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# Print out a pygresql result set (a Query object, after the query
# has been executed), in a format that imitates the default
# formatting of psql. This isn't a perfect imitation: we left-justify
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
# enough.)
def printout_result(self, r):
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
elif flag and flag[0] == "M":
con_mode = "mirror"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
elif flag == "M":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip())
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
elif re.match(r";.*--", line) or re.match(r"^--", line):
command_part = line.partition("--")[0] # remove comment from line
else:
command_part = line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
I: include a file of sql statements (useful for loading reusable functions)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Including files:
-- example contents for file.sql: create function some_test_function() returning void ...
include: path/to/some/file.sql;
select some_helper_function();
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
|
solaris_exporter.py | #!/usr/bin/python
"""
sparc-exporter
version v2021Jan05
2020 Jan 31. Initial
2020 Feb 04. Added UpTime in UpTimeCollector.
2020 Feb 09. Added DiskErrorCollector, ZpoolCollector, FmadmCollector, SVCSCollector, FCinfoCollector
2020 Dec 17. Added PrtdiagCollector, MetaStatCollector, MetaDBCollector
2021 Jan 05. Added TextFileCollector, SVCSCollector now enabled for all zones (Thanks to Marcel Peter)
2021 Mar 01. Fixed psutil version to 5.7.0 (something changed in the newer versions, have to time to look at)
Written by Alexander Golikov for collecting SPARC Solaris metrics for Prometheus.
Tested on Solaris 11.3.25, 11.4.4, 10u11(limited) SPARC.
May be it also will work on x86 platform, but this is not tested.
This exporter provides info about:
- Solaris Zones CPU Usage with processor sets info (PerZoneCpuCollector);
- Solaris Zones Virtual Memory (SWAP) Resource Capping (PerZoneCapsCollector);
- Common CPU stats (CpuTimeCollector);
- Avg Load (CpuLoadCollector);
- Disk IO (DiskIOCollector);
- Disk Errors (DiskErrorCollector);
- Disk Space (DiskSpaceCollector, requires 'file_dac_search' priv for solaris zones)
- Memory Usage, swap-in, swap-out (MemCollector);
- Network Interfaces (NetworkCollector);
- Node time, uptime (CurTimeCollector, UpTimeCollector);
- FC links Multipath (FCinfoCollector, /usr/sbin/mpathadm list lu)
- System Services health via 'svcs -x' command (SVCSCollector);
- Whole system health via 'fmadm faulty' (FmadmCollector), requires pfexec of '/usr/sbin/fmadm'.
- Zpool devices health via 'zpool status' command (ZpoolCollector)
- Solaris Volume Manager disk status (MetaStatCollector, MetaDBCollector).
- Get info from text files *.prom in folder provided by text_file_path var (TextFileCollector).
Installation. To use this exporter you need python2.7 and its modules prometheus_client, psutil.
Solaris 10u11:
# Setup proxy vars to have access to internet
export http_proxy=http://proxy.example.com:3128
export https_proxy=https://proxy.example.com:3128
# Install pkgutil
pkgadd -d http://get.opencsw.org/now
# Update repo list and install 'py_pip', 'python27', 'python27_dev', 'gcc5core'
/opt/csw/bin/pkgutil -U
/opt/csw/bin/pkgutil -y -i py_pip
/usr/sbin/pkgchk -L CSWpy-pip # list installed files if you need
/opt/csw/bin/pkgutil -y -i python27
/opt/csw/bin/pkgutil -y -i python27_dev
/opt/csw/bin/pkgutil -y -i gcc5core
# Install Python 2.7 module prometheus_client, it installes eassily.
/opt/csw/bin/pip2.7 install prometheus_client
# Install Python 2.7 module psutil, it have to compile some libs, but we preinstalled all that needed
ln -s /opt/csw/bin/gcc-5.5 /opt/csw/bin/gcc-5.2
/opt/csw/bin/pip2.7 install psutil==5.7.0
# Run exporter, check http://ip:9100
/opt/csw/bin/python2.7 solaris_exporter.py
Solaris 11.4:
# Setup proxy vars to have access to internet
export https_proxy=https://proxy.example.com:3128
# Install Python 2.7 module prometheus_client, it installes eassily.
pip install prometheus_client
# Install Python 2.7 module psutil, it have to compile some libs
# Also you could get psutil via 'pkg install library/python/psutil-27',
# but it returns wrong Network statistics, tested from Solaris 11.4.4 repo.
pkg install pkg:/developer/gcc/gcc-c-5
ln -s /usr/bin/gcc /usr/bin/cc
export CFLAGS=-m32
pip install psutil==5.7.0
# Run exporter, check http://ip:9100
python2.7 solaris_exporter.py
"""
import time
import re
import subprocess
import threading
import socket
import psutil
from psutil import _psutil_sunos as cext
import os
from prometheus_client.core import REGISTRY, Counter, Gauge, GaugeMetricFamily, CounterMetricFamily, UntypedMetricFamily
from prometheus_client.parser import text_string_to_metric_families
from prometheus_client import start_http_server
from glob import glob
exporter_port = 9100
text_file_path = '/opt/solaris_exporter/'
dictionaries_refresh_interval_sec = 600
disk_operations_dictionary = {
'reads': 'number of read operations',
'writes': 'number of write operations',
'nread': 'number of bytes read',
'nwritten': 'number of bytes written',
'wlentime': 'cumulative wait length time product',
'rlentime': 'cumulative run length time product',
'rtime': 'cumulative run service time',
'wtime': 'cumulative wait pre-service time',
'rcnt': 'count of elements in run state',
'wcnt': 'count of elements in wait state',
'crtime': 'creation time in seconds with nano',
'snaptime': 'time of last data snapshot in seconds with nano',
'rlastupdate': 'last time run queue changed in seconds with nano',
'wlastupdate': 'last time wait queue changed in seconds with nano',
}
per_zone_cpu_counters_dictionary = {
'cpu_nsec_kernel': 'per CPU microstate counter kernel time for zone, seconds',
'cpu_nsec_user': 'per CPU microstate counter user time for zone, seconds',
'readch': 'bytes read for zone',
'writech': 'bytes wrote for zone',
'sysread': 'read count for zone',
'syswrite': 'write count for zone',
'syscall': 'system calls for zone',
'sysexec': 'execs for zone',
'sysfork': 'forks for zone',
'sysspawn': 'spawns for zone',
}
def run_shell_command(commandline, timeout):
"""
Run OS command with timeout and status return. Also works in Python 2.7.
Example:
output, task_return_code, task_timeouted = run_shell_command('shell command text', timeout)
"""
output = ""
task_timeouted = False
task_return_code = 100
FNULL = open(os.devnull, 'w')
try:
task = subprocess.Popen(commandline.split(), shell=False, stdout=subprocess.PIPE, stderr=FNULL)
except OSError:
task_return_code = 101
return "", task_return_code, task_timeouted
task_stop_time = time.time() + timeout
def killer_for_task(task, task_stop_time):
while task.poll() is None and time.time() < task_stop_time:
time.sleep(0.1)
if time.time() > task_stop_time:
try:
task.kill()
try:
task.stdout.close()
except (ValueError, IOError) as e:
pass
except OSError:
pass
killer_job = threading.Thread(target=killer_for_task, args=(task, task_stop_time))
killer_job.start()
# Wait for subprocess complete. Timeout is controlled by thread killer_job
try:
output = task.communicate()[0]
except ValueError:
pass
killer_job.join()
FNULL.close()
if time.time() >= task_stop_time:
task_timeouted = True
else:
task_return_code = task.returncode
try:
task.stdout.close()
except ValueError:
pass
return output, task_return_code, task_timeouted
def get_disk_dictionary():
"""
function returns dict in format:
{kernel_disk_name: [admin_disk_name, disk_description]}
emulates this commands:
# /usr/bin/iostat -E | grep Soft | awk '{ print $1}' > /tmp/a;
# /usr/bin/iostat -En | grep Soft|awk '{ print $1 }' > /tmp/b; paste /tmp/a /tmp/b
# /usr/bin/rm /tmp/a /tmp/b
"""
disk_dictionary = {}
iostatE, iostatE_return_code, iostatE_timeouted = run_shell_command('/usr/bin/iostat -E', 4)
iostatEn, iostatEn_return_code, iostatEn_timeouted = run_shell_command('/usr/bin/iostat -En', 4)
if iostatE_timeouted is False and iostatE_return_code == 0 and iostatEn_timeouted is False and iostatEn_return_code == 0:
iostatE_lines = iostatE.splitlines()
kernel_disk_name = []
for iostatE_line in iostatE_lines:
if "Soft" in iostatE_line:
diskstrings = iostatE_line.split()
kernel_disk_name.append(diskstrings[0])
iostatEn_lines = iostatEn.splitlines()
admin_disk_name = []
j = 0
for iostatEn_line in iostatEn_lines:
if "Soft" in iostatEn_line:
diskstrings = iostatEn_line.split()
admin_disk_name.append(diskstrings[0])
elif "Vendor" in iostatEn_line:
one_disk_desc = re.sub(r'Vendor: (.*[^ ]) *Product: (.*[^ ]) *(Revision|Size).*', r'\1 \2',
iostatEn_line)
one_disk_desc = re.sub(r' +', ' ', one_disk_desc) # replace double spaces by one space
disk_dictionary.update({kernel_disk_name[j]: [admin_disk_name[j], one_disk_desc]})
j += 1
return (disk_dictionary)
class NetworkCollector(object):
"""
Network Interfaces stats
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
NetworkCollector_Timeouts = Counter('solaris_exporter_network_usage_timeouts',
'Number of times when collector ran' +
' more than ' + str(max_time_to_run) + ' seconds')
NetworkCollector_Errors = Counter('solaris_exporter_network_usage_errors', 'Number of times when collector ran' +
' with errors')
network_collector_run_time = Gauge('solaris_exporter_network_usage_processing', 'Time spent processing request')
def collect_unused(self):
with self.network_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('kstat -p -c net :::*bytes64',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
network_usage = CounterMetricFamily("solaris_exporter_network_usage", 'kstat counters',
labels=['driver', 'name', 'statistic', 'host'])
for line in lines:
kstatkeyvalue = line.split("\t")
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
driver = kstatkey[0]
# instance = kstatkey[1]
name = kstatkey[2]
statistic = kstatkey[3].replace('obytes', 'output-bytes').replace('rbytes', 'input-bytes').replace(
'odropbytes', 'output-dropped-bytes').replace('idropbytes', 'input-dropped-bytes')
value = kstatkeyvalue[1]
if value == "" or name == "class":
continue
network_usage.add_metric([driver, name, statistic, host_name], value)
else:
self.NetworkCollector_Errors.inc()
if task_timeouted:
self.NetworkCollector_Timeouts.inc()
yield network_usage
def collect(self):
with self.network_collector_run_time.time():
try:
net_stats = psutil.net_io_counters(pernic=True)
except RuntimeError:
self.NetworkCollector_Errors.inc()
else:
network_usage = CounterMetricFamily("solaris_exporter_network_usage", 'kstat counters',
labels=['NIC', 'statistic', 'host'])
for NIC in net_stats:
network_usage.add_metric([NIC, 'bytes_sent', host_name], net_stats[NIC].bytes_sent)
network_usage.add_metric([NIC, 'bytes_recv', host_name], net_stats[NIC].bytes_recv)
network_usage.add_metric([NIC, 'errin', host_name], net_stats[NIC].errin)
network_usage.add_metric([NIC, 'errout', host_name], net_stats[NIC].errout)
network_usage.add_metric([NIC, 'dropin', host_name], net_stats[NIC].dropin)
network_usage.add_metric([NIC, 'dropout', host_name], net_stats[NIC].dropout)
yield network_usage
class DiskIOCollector(object):
"""
Disk IO Stats
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
disk_io_collector_timeouts = Counter('solaris_exporter_diskio_usage_timeouts',
'Number of times when collector ran' +
' more than ' + str(max_time_to_run) + ' seconds')
disk_io_collector_errors = Counter('solaris_exporter_diskio_usage_errors', 'Number of times when collector ran' +
' with errors')
disk_io_collector_run_time = Gauge('solaris_exporter_diskio_usage_processing', 'Time spent processing request')
def collect(self):
with self.disk_io_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('kstat -p -c disk', self.max_time_to_run)
disk_io_usage = CounterMetricFamily("solaris_exporter_diskio_usage", 'kstat counters',
labels=['driver', 'name', 'statistic', 'stat_desc',
'admin_name', 'admin_desc', 'host'])
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
for line in lines:
kstatkeyvalue = line.split("\t")
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
driver = kstatkey[0]
# instance = kstatkey[1]
name = kstatkey[2]
statistic = kstatkey[3]
value = kstatkeyvalue[1]
# skip useless values
if value == "" or value == "disk":
continue
# skip useless statistic
if statistic in ['wlastupdate', 'rlastupdate', 'rcnt', 'wcnt', 'crtime', 'snaptime']:
continue
# resolve admin_name and admin_desc via dictionary
try:
admin_name = disk_dictionary[name][0]
admin_desc = disk_dictionary[name][1]
except KeyError:
admin_name = "unknown"
admin_desc = "unknown"
# resolve stat_desc via dictionary
try:
stat_desc = disk_operations_dictionary[statistic]
except KeyError:
stat_desc = "unknown"
disk_io_usage.add_metric([driver, name, statistic, stat_desc, admin_name, admin_desc,
host_name], float(value))
else:
self.disk_io_collector_errors.inc()
if task_timeouted:
self.disk_io_collector_timeouts.inc()
yield disk_io_usage
class DiskErrorCollector(object):
"""
Disk Error Stats
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
disk_er_collector_timeouts = Counter('solaris_exporter_disk_error_collector_timeouts',
'Number of times when collector ran' +
' more than ' + str(max_time_to_run) + ' seconds')
disk_er_collector_errors = Counter('solaris_exporter_disk_error_collector_errors', 'Number of times when collector ran' +
' with errors')
disk_er_collector_run_time = Gauge('solaris_exporter_disk_errors_collector_processing', 'Time spent processing request')
def collect(self):
with self.disk_er_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('kstat -p -c device_error :::/.*Errors/', self.max_time_to_run)
disk_errors = CounterMetricFamily("solaris_exporter_disk_errors", 'kstat counters',
labels=['driver', 'name', 'statistic',
'admin_name', 'admin_desc', 'host'])
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
for line in lines:
kstatkeyvalue = line.split("\t") # sderr:58:sd58,err:Transport Errors
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
module = kstatkey[0].replace('err', '')
# instance = kstatkey[1]
name = kstatkey[2].replace('_err', '')
statistic = kstatkey[3]
value = kstatkeyvalue[1]
# resolve admin_name and admin_desc via dictionary
try:
admin_name = disk_dictionary[name][0]
admin_desc = disk_dictionary[name][1]
except KeyError:
admin_name = "unknown"
admin_desc = "unknown"
disk_errors.add_metric([module, name, statistic, admin_name, admin_desc,
host_name], float(value))
else:
self.disk_er_collector_errors.inc()
if task_timeouted:
self.disk_er_collector_timeouts.inc()
yield disk_errors
class CpuLoadCollector(object):
"""
CPU load average 1, 5, 15 min, cpu count
"""
cpu_load_collector_run_time = Gauge('solaris_exporter_cpu_load_processing', 'Time spent processing request')
def collect(self):
with self.cpu_load_collector_run_time.time():
worker_stat_cpu_load = GaugeMetricFamily('solaris_exporter_cpu_load',
'python psutil counters, system load avg.',
labels=['host', 'statistic'])
cpuinfo = os.getloadavg()
worker_stat_cpu_load.add_metric([host_name, 'load1m'], cpuinfo[0])
worker_stat_cpu_load.add_metric([host_name, 'load5m '], cpuinfo[1])
worker_stat_cpu_load.add_metric([host_name, 'load15m'], cpuinfo[2])
cpuinfo = len(psutil.cpu_percent(interval=None, percpu=True))
worker_stat_cpu_load.add_metric([host_name, 'vcpu'], cpuinfo)
yield worker_stat_cpu_load
class CpuTimeCollector(object):
"""
CPU time may be translated in percent later
"""
cpu_time_collector_run_time = Gauge('solaris_exporter_cpu_time_processing', 'Time spent processing request')
def collect(self):
with self.cpu_time_collector_run_time.time():
worker_stat_cpu_time = CounterMetricFamily('solaris_exporter_cpu_time',
'python psutil counters, CPU usage time.',
labels=['host', 'statistic'])
cpuinfo = psutil.cpu_times(percpu=False)
worker_stat_cpu_time.add_metric([host_name, 'user'], cpuinfo.user)
worker_stat_cpu_time.add_metric([host_name, 'system'], cpuinfo.system)
worker_stat_cpu_time.add_metric([host_name, 'idle'], cpuinfo.idle)
worker_stat_cpu_time.add_metric([host_name, 'oiwait'], cpuinfo.iowait)
yield worker_stat_cpu_time
class MemCollector(object):
"""
Memory and SWAP Stats
"""
mem_collector_run_time = Gauge('solaris_exporter_MemCollector_processing', 'Time spent processing request')
def collect(self):
with self.mem_collector_run_time.time():
worker_stat_mem = GaugeMetricFamily('solaris_exporter_memory_usage_bytes',
'python psutil counters, Memory usage in bytes.',
labels=['host', 'type', 'counter'])
ram = psutil.virtual_memory()
swap = psutil.swap_memory()
worker_stat_mem.add_metric([host_name, 'virtual', 'used'], ram.used)
worker_stat_mem.add_metric([host_name, 'virtual', 'available'], ram.available)
worker_stat_mem.add_metric([host_name, 'virtual', 'total'], ram.total)
worker_stat_mem.add_metric([host_name, 'virtual', 'free'], ram.free)
worker_stat_mem.add_metric([host_name, 'swap', 'total'], swap.total)
worker_stat_mem.add_metric([host_name, 'swap', 'used'], swap.used)
worker_stat_mem.add_metric([host_name, 'swap', 'free'], swap.free)
worker_stat_mem.add_metric([host_name, 'swap', 'sin'], swap.sin)
worker_stat_mem.add_metric([host_name, 'swap', 'sout'], swap.sout)
yield worker_stat_mem
# this code is rewritten psutil.disk_partitions() due to bug with nfs mounted in local zones
# https://github.com/giampaolo/psutil/issues/1674
# later it was simplified as using cext.disk_partitions() in my code.
# from psutil import _psposix
# from psutil import _psutil_sunos as cext
# from psutil import _common
# disk_usage = _psposix.disk_usage
#
# def my_disk_partitions(all=False):
# """Return system disk partitions.
# This function is rewritten psutils.disk_partitions() due to its bug
# with mounted NFS folders into solaris localzones. Now we have try-except OSError 'Not owner'
# arround disk_usage()
# """
# retlist = []
# partitions = cext.disk_partitions()
# for partition in partitions:
# device, mountpoint, fstype, opts = partition
# if device == 'none':
# device = ''
# if not all:
# try:
# if not disk_usage(mountpoint).total:
# continue
# except OSError:
# continue
#
# ntuple = _common.sdiskpart(device, mountpoint, fstype, opts)
# retlist.append(ntuple)
# return retlist
class DiskSpaceCollector(object):
"""
Disk space stats
Note that UFS inode info is NOT collected.
"""
disk_space_collector_run_time = Gauge('solaris_exporter_diskspace_worker', 'Time spent processing request')
def collect(self):
with self.disk_space_collector_run_time.time():
worker_stat_space = GaugeMetricFamily('solaris_exporter_diskspace_usage_bytes',
'python psutil counters, diskspace usage in bytes.',
labels=['host', 'statistic', 'mountpoint', 'device', 'fstype', ])
# disk_partitions = my_disk_partitions(all=False) # rewritten due to bug: https://github.com/giampaolo/psutil/issues/1674
disk_partitions = cext.disk_partitions()
for partition in disk_partitions:
device, mountpoint, fstype, opts = partition
if fstype not in ['zfs', 'ufs']:
continue
if '/VARSHARE' in device:
continue
try:
spaceinfo = psutil.disk_usage(mountpoint)
except OSError:
continue
worker_stat_space.add_metric([host_name, 'used', mountpoint, device, fstype], spaceinfo.used)
worker_stat_space.add_metric([host_name, 'total', mountpoint, device, fstype], spaceinfo.total)
worker_stat_space.add_metric([host_name, 'free', mountpoint, device, fstype], spaceinfo.free)
worker_stat_space.add_metric([host_name, 'percent', mountpoint, device, fstype], spaceinfo.percent)
yield worker_stat_space
class CurTimeCollector(object):
"""
current_time - For Dirty comparation with Prometheus server time.
"""
def collect(self):
cur_time_metric_family = CounterMetricFamily('solaris_exporter_current_time_seconds', 'Current time of system', labels=[])
cur_time_metric_family.add_metric([], time.time())
yield cur_time_metric_family
class UpTimeCollector(object):
"""
uptime - for reboot alarming.
"""
def collect(self):
uptime_metric_family = CounterMetricFamily('solaris_exporter_uptime_seconds', 'uptime of system', labels=[])
uptime_metric_family.add_metric([], time.time() - psutil.boot_time())
yield uptime_metric_family
def get_pset_dictionary():
"""
Returns pset dictionary: {'pset_num': 'cpu_count_in_pset'}, example: {'0': '144'}
"""
pset_dictionary = {}
output, return_code, timeouted = run_shell_command("kstat -p -c misc unix::pset:ncpus", 5)
lines = output.splitlines()
for line in lines:
kstatkeyvalue = line.split("\t")
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
# kstatkey[0] always set to 'unix'
pset_number = kstatkey[1]
# kstatkey[2] always set to 'pset'
# kstatkey[3] always set to 'ncpus'
value = kstatkeyvalue[1] # cpu number in pset
pset_dictionary[pset_number] = float(value)
return pset_dictionary
class PerZoneCpuCollector(object):
"""
Solaris Zones CPU Usage with processor sets info and zone activity stats
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 25
per_zone_cpu_collector_timeouts = Counter('solaris_exporter_per_zone_cpu_timeouts',
'Number of times when collector ran' +
' more than ' + str(max_time_to_run) + ' seconds')
per_zone_cpu_collector_errors = Counter('solaris_exporter_per_zone_cpu_errors',
'Number of times when collector ran with errors')
per_zone_cpu_collector_run_time = Gauge('solaris_exporter_per_zone_cpu_processing', 'Time spent processing request')
def collect(self):
with self.per_zone_cpu_collector_run_time.time():
per_zone_usage = CounterMetricFamily("solaris_exporter_per_zone_usage_total", 'kstat counters',
labels=['zone', 'statistic', 'stat_desc', 'pset', 'host'])
per_zone_usage_dict = {} # will be nested dict
zonename_dict = {}
zone_pset_dict = {}
query = ''
for counter in per_zone_cpu_counters_dictionary:
query += "|^"+counter+"$"
query = "-c zones cpu::/^sys_zone_*/:/(" + query[1:] + "|^zonename$)/"
# print('kstat -p '+query)
output, task_return_code, task_timeouted = run_shell_command('kstat -p '+query, self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
for line in lines:
kstatkeyvalue = line.split("\t")
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
# kstatkey[0] # always set to 'cpu'
# kstatkey[1] # zone_sys_number or cpu_number
# kstatkey[2] # 'sys_zone_21' or 'sys_zone_accum' or 'sys_zone_pset_0_accum'
if kstatkey[2].startswith('sys_zone_pset_'):
zone_pset_number = re.sub(r'sys_zone_pset_([0-9]+)_accum', r'\1', kstatkey[2])
zone_sys_number = kstatkey[1]
zone_pset_dict[zone_sys_number] = zone_pset_number
continue
elif kstatkey[2] == 'sys_zone_accum':
continue
zone_sys_number = re.sub(r'^sys_zone_([0-9]+)$', r'\1', kstatkey[2])
statistic = kstatkey[3]
value = kstatkeyvalue[1]
if statistic == 'zonename':
zonename_dict[zone_sys_number] = value
continue
# create new nested dictionary for zone_sys_name, or preserve it if it exists
per_zone_usage_dict[zone_sys_number] = per_zone_usage_dict.get(zone_sys_number, {})
# add value to nested dictionary of statistic for zone_sys_name
per_zone_usage_dict[zone_sys_number][statistic] = \
per_zone_usage_dict[zone_sys_number].get(statistic, 0.0) + float(value)
# evacuate stored in dictionaries info into metrics
for zone_sys_number in per_zone_usage_dict.keys():
for statistic in per_zone_usage_dict[zone_sys_number].keys():
local_zone_name = zonename_dict.get(zone_sys_number, 'sys_zone_' + zone_sys_number)
pset_number = zone_pset_dict.get(zone_sys_number, 'unknown')
stat_desc = per_zone_cpu_counters_dictionary.get(statistic, 'unknown')
value = per_zone_usage_dict.get(zone_sys_number, {}).get(statistic, 0.0)
if statistic in ['cpu_nsec_kernel', 'cpu_nsec_user']:
statistic = statistic[9:]
cpus_in_pset = pset_dictionary.get(pset_number, 0)
try:
value = value / cpus_in_pset / 1000000000 # translate nsec in sec
except ZeroDivisionError:
value = 0
per_zone_usage.add_metric([local_zone_name, statistic, stat_desc, pset_number, host_name], value)
per_zone_usage.add_metric([local_zone_name, 'cpus', 'cpu number in pset', pset_number, host_name], cpus_in_pset)
else:
self.per_zone_cpu_collector_errors.inc()
if task_timeouted:
self.per_zone_cpu_collector_timeouts.inc()
yield per_zone_usage
class PerZoneCapsCollector(object):
"""
Solaris Zones Virtual Memory (SWAP) Resource Capping, current nprocs number in zones
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 25
per_zone_caps_collector_timeouts = Counter('solaris_exporter_per_zone_caps_timeouts',
'Number of times when collector ran' +
' more than ' + str(max_time_to_run) + ' seconds')
per_zone_caps_collector_errors = Counter('solaris_exporter_per_zone_caps_errors',
'Number of times when collector ran with errors')
per_zone_caps_collector_run_time = Gauge('solaris_exporter_per_zone_caps_processing', 'Time spent processing request')
def collect(self):
with self.per_zone_caps_collector_run_time.time():
per_zone_caps = GaugeMetricFamily("solaris_exporter_per_zone_caps_total", 'kstat counters about zone resources',
labels=['zone', 'statistic', 'host'])
per_zone_caps_dict = {} # will be nested dict
zonename_dict = {}
query = "-c zone_caps caps::/^swapresv_zone_[0-9]+$/:/^(usage|value|zonename)$/ caps::/^nprocs_zone_[0-9]+$/:usage"
# print('kstat -p '+query)
output, task_return_code, task_timeouted = run_shell_command('kstat -p '+query, self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
for line in lines:
kstatkeyvalue = line.split("\t")
kstatkeyvalue[0] = re.sub('[ ,!=]', '_', kstatkeyvalue[0]).replace(",", ".")
kstatkey = kstatkeyvalue[0].split(":")
# kstatkey[0] # always set to 'caps'
# kstatkey[1] # zone_sys_number
# kstatkey[2] # 'swapresv_zone_28' or 'nprocs_zone_18'
# kstatkey[3] # 'zonename', 'usage' or 'value' text
zone_sys_number = kstatkey[1]
value = kstatkeyvalue[1]
if kstatkey[2].startswith('nprocs_zone'):
statistic = 'nprocs_current'
else:
if kstatkey[3] == 'value':
statistic = 'swap_limit_bytes'
elif kstatkey[3] == 'usage':
statistic = 'swap_usage_bytes'
if kstatkey[3] == 'zonename':
zonename_dict[zone_sys_number] = value
continue
# create new nested dictionary for zone_sys_name, or preserve it if it exists
per_zone_caps_dict[zone_sys_number] = per_zone_caps_dict.get(zone_sys_number, {})
# add value to nested dictionary of statistic for zone_sys_name
per_zone_caps_dict[zone_sys_number][statistic] = float(value)
# evacuate stored in dictionaries info into metrics
for zone_sys_number in per_zone_caps_dict.keys():
for statistic in per_zone_caps_dict[zone_sys_number].keys():
local_zone_name = zonename_dict.get(zone_sys_number, 'sys_zone_' + zone_sys_number)
value = per_zone_caps_dict.get(zone_sys_number, {}).get(statistic, 0.0)
per_zone_caps.add_metric([local_zone_name, statistic, host_name], value)
else:
self.per_zone_caps_collector_errors.inc()
if task_timeouted:
self.per_zone_caps_collector_timeouts.inc()
yield per_zone_caps
class FCinfoCollector(object):
"""
FC links Multipath
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
fc_lun_collector_timeouts = Counter('solaris_exporter_fc_paths_timeouts',
'timeouts')
fc_lun_collector_errors = Counter('solaris_exporter_fc_paths_errors', 'Number of times when collector ran' +
' with errors')
fc_lun_collector_run_time = Gauge('solaris_exporter_fc_paths_processing', 'Time spent processing request')
def collect(self):
with self.fc_lun_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/sbin/mpathadm list lu',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
fc_lun = GaugeMetricFamily("solaris_exporter_fc_paths", '/usr/sbin/mpathadm list lu', labels=['device', 'stat', 'host'])
fc_total_paths = {}
fc_active_paths = {}
for line in lines:
content = line.strip()
if '/dev/rdsk/' in content:
device = re.sub(r'/dev/rdsk/(.*)s2', r'\1', content)
elif 'Total Path Count' in content:
content = content.split(':')
fc_total_paths[device] = content[1]
elif 'Operational Path Count:' in content:
content = content.split(':')
fc_active_paths[device] = content[1]
else:
device="unknown"
for device in fc_total_paths.keys():
if device == "unknown":
continue
fc_lun.add_metric([device, 'active', host_name], float(fc_active_paths.get(device, 0)))
fc_lun.add_metric([device, 'total', host_name], float(fc_total_paths.get(device, 0)))
yield fc_lun
else:
self.fc_lun_collector_errors.inc()
if task_timeouted:
self.fc_lun_collector_timeouts.inc()
class SVCSCollector(object):
"""
'svcs -x' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
svcs_x_collector_timeouts = Counter('solaris_exporter_svcs_x_timeouts',
'timeouts')
svcs_x_collector_errors = Counter('solaris_exporter_svcs_x_errors', 'Number of times when collector ran' +
' with errors')
svcs_x_collector_run_time = Gauge('solaris_exporter_svcs_x_processing', 'Time spent processing request')
def collect(self):
with self.svcs_x_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/bin/svcs -x',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
svcs_x = GaugeMetricFamily("solaris_exporter_svcs_x_failed_services", 'failed services counter in svcs -x',
labels=['host'])
svcs_fail = 0
for line in lines:
if line.strip().startswith('svc:'):
svcs_fail += 1
svcs_x.add_metric([host_name], float(svcs_fail))
else:
self.svcs_x_collector_errors.inc()
if task_timeouted:
self.svcs_x_collector_timeouts.inc()
yield svcs_x
class FmadmCollector(object):
"""
'fmadm faulty' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 15
fmadm_collector_timeouts = Counter('solaris_exporter_fmadm_timeouts',
'timeouts')
fmadm_collector_errors = Counter('solaris_exporter_fmadm_errors', 'Number of times when collector ran' +
' with errors')
fmadm_collector_run_time = Gauge('solaris_exporter_fmadm_processing', 'Time spent processing request')
def collect(self):
with self.fmadm_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/bin/pfexec /usr/sbin/fmadm faulty',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
fmadm = GaugeMetricFamily("solaris_exporter_fmadm_faults", 'faults in fmadm faulty',
labels=['host'])
faults = 0
for line in lines:
if line.strip().startswith('TIME'):
faults += 1
fmadm.add_metric([host_name], float(faults))
yield fmadm
else:
self.fmadm_collector_errors.inc()
if task_timeouted:
self.fmadm_collector_timeouts.inc()
class ZpoolCollector(object):
"""
'zpool status' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 4
zpool_collector_timeouts = Counter('solaris_exporter_zpool_timeouts',
'timeouts')
zpool_collector_errors = Counter('solaris_exporter_zpool_errors', 'Number of times when collector ran' +
' with errors')
zpool_collector_run_time = Gauge('solaris_exporter_zpool_processing', 'Time spent processing request')
def collect(self):
with self.zpool_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/sbin/zpool status',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
zpool = GaugeMetricFamily("solaris_exporter_zpool_faults", 'faults in zpool status',
labels=['host'])
faults = 0
for line in lines:
line = line.strip()
if any(s in line for s in ['FAILED', 'DEGRADED']):
faults += 1
zpool.add_metric([host_name], float(faults))
yield zpool
else:
self.zpool_collector_errors.inc()
if task_timeouted:
self.zpool_collector_timeouts.inc()
class MetaStatCollector(object):
"""
'metastat -a' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 5
metastat_collector_timeouts = Counter('solaris_exporter_metastat_timeouts',
'timeouts')
metastat_collector_errors = Counter('solaris_exporter_metastat_errors', 'Number of times when collector ran' +
' with errors')
metastat_collector_run_time = Gauge('solaris_exporter_metastat_processing', 'Time spent processing request')
def collect(self):
with self.metastat_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/sbin/metastat -a',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
metastat = GaugeMetricFamily("solaris_exporter_metastat_faults", 'faults in metastat',
labels=['host'])
faults = 0
for line in lines:
line = line.strip()
if any(s in line for s in ['Needs maintenance', 'Last erred', 'Unavailable']):
faults += 1
metastat.add_metric([host_name], float(faults))
yield metastat
else:
self.metastat_collector_errors.inc()
if task_timeouted:
self.metastat_collector_timeouts.inc()
class MetaDBCollector(object):
"""
'metadb' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 5
metadb_collector_timeouts = Counter('solaris_exporter_metadb_timeouts',
'timeouts')
metadb_collector_errors = Counter('solaris_exporter_metadb_errors', 'Number of times when collector ran' +
' with errors')
metadb_collector_run_time = Gauge('solaris_exporter_metadb_processing', 'Time spent processing request')
def collect(self):
with self.metadb_collector_run_time.time():
output, task_return_code, task_timeouted = run_shell_command('/usr/sbin/metadb',
self.max_time_to_run)
if task_return_code == 0 and task_timeouted is False:
lines = output.splitlines()
metadb = GaugeMetricFamily("solaris_exporter_metadb_faults", 'faults in metadb',
labels=['host'])
faults = 0
for line in lines:
line = line.strip()
if any(s in line for s in ['W', 'D', 'M']):
faults += 1
metadb.add_metric([host_name], float(faults))
yield metadb
else:
self.metadb_collector_errors.inc()
if task_timeouted:
self.metadb_collector_timeouts.inc()
class PrtdiagCollector(object):
"""
'prtdiag' checker
"""
# timeout how match seconds is allowed to collect data
max_time_to_run = 50
prtdiag_collector_timeouts = Counter('solaris_exporter_prtdiag_timeouts', 'timeouts')
prtdiag_collector_run_time = Gauge('solaris_exporter_prtdiag_processing', 'Time spent processing request')
its_time_to_run_now = 0
# repeat prtdiag only after each 60 times, write result from cache instead (prtdiag is heavy)
repeat_prtdiag_after_times = 60
def collect(self):
global prtdiag_return_code
global prtdiag_timeouted
if self.its_time_to_run_now == 0:
with self.prtdiag_collector_run_time.time():
prtdiag_output, prtdiag_return_code, prtdiag_timeouted = run_shell_command('/usr/sbin/prtdiag -v',
self.max_time_to_run)
if prtdiag_timeouted is True:
self.prtdiag_collector_timeouts.inc()
self.its_time_to_run_now += 1
self.its_time_to_run_now %= self.repeat_prtdiag_after_times
if prtdiag_timeouted is False:
prtdiag = GaugeMetricFamily("solaris_exporter_prtdiag_rc", 'prtdiag return code', labels=['host'])
prtdiag.add_metric([host_name], float(prtdiag_return_code))
yield prtdiag
class TextFileCollector(object):
"""
Read Input from a textfile to include in output. Thanks to Marcel Peter
"""
TextFileCollector_run_time = Gauge('solaris_exporter_textfile_processing', 'Time spent processing request')
def collect(self):
with self.TextFileCollector_run_time.time():
fpath = text_file_path
fnames = glob(fpath + '*.prom')
for file_name_r in fnames:
# filename to open for read
with open(file_name_r, 'r') as text_object:
output = text_object.read()
for family in text_string_to_metric_families(output):
yield family
text_object.close
# replace start_http_server() method to capture error messages in my_http_error_handler()
# remove this to revert to prometheus_client.start_http_server
from BaseHTTPServer import HTTPServer
from prometheus_client import MetricsHandler
from SocketServer import ThreadingMixIn
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
daemon_threads = True
def start_http_server(port, addr='', registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
def my_http_error_handler(request, client_address):
print ('Request from ' + client_address[0] + ':' + str(client_address[1]) + ' dropped. Broken pipe.')
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
httpd.handle_error = my_http_error_handler
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
# end of replace start_http_server()
if __name__ == '__main__':
assert psutil.SUNOS, 'This program is for Solaris OS only. See installation doc in its header'
host_name = socket.gethostname()
# this will be refreshed once in dictionaries_refresh_interval_sec
disk_dictionary = get_disk_dictionary()
pset_dictionary = get_pset_dictionary()
prtdiag_return_code = 0
prtdiag_timeouted = False
# collectors enabled for all zones:
collectors = [
CurTimeCollector(),
UpTimeCollector(),
NetworkCollector(),
DiskSpaceCollector(),
SVCSCollector(),
TextFileCollector(),
]
zones, rc, timeouted = run_shell_command('/usr/sbin/zoneadm list -icp', 3)
nzones = 0
if rc == 0 and not timeouted:
zones = zones.splitlines()
for line in zones:
zone = line.split(':')
# print(zone)
zone = zone[1]
if zone != "global":
nzones += 1
zonename, rc, timeouted = run_shell_command('/usr/bin/zonename', 3)
zonename = zonename.strip()
if zonename == "global":
collectors.extend([
CpuLoadCollector(),
CpuTimeCollector(),
MemCollector(),
DiskIOCollector(),
DiskErrorCollector(),
ZpoolCollector(),
FCinfoCollector(),
FmadmCollector(),
PrtdiagCollector(),
MetaDBCollector(),
MetaStatCollector(),
])
# enable zone collectors only if global zones have localzones or we are running inside localzone
if nzones > 0 or zonename != "global":
collectors.extend([
PerZoneCpuCollector(),
PerZoneCapsCollector(),
])
# start webserver and register selected collectors in prometheus.client library
start_http_server(exporter_port)
for c in collectors:
REGISTRY.register(c)
while True:
try:
time.sleep(dictionaries_refresh_interval_sec)
# this will be refresh dicts once in dictionaries_refresh_interval_sec
disk_dictionary = get_disk_dictionary()
pset_dictionary = get_pset_dictionary()
except KeyboardInterrupt:
print("\nExit Requested\n")
exit()
|
zsync_server.py | # -*- coding: utf-8 -*-
import zmq
import os
import time
from threading import Thread
from zhelpers import socket_set_hwm, zpipe
from multiprocessing import Queue
fileq = None
mypath = 'sync_files'
ip = 'localhost'
ports = [13330, 13331, 13332]
CHUNK_SIZE = 250000
PIPELINE = 10
def send_one(socket):
if fileq.empty():
return False
try:
fname = fileq.get(True, 0.001)
except Queue.Empty, e:
return True
try:
msg = socket.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return False # shutting down, quit
else:
raise
identity, command = msg
assert command == b"fetch"
socket.send_multipart([identity, fname])
print 'sending ' + fname
file = open(fname, 'r')
while True:
try:
msg = socket.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return False # shutting down, quit
else:
raise
identity, command, offset_str, chunksz_str = msg
if command == b'close':
break
assert command == b"fetch"
offset = int(offset_str)
chunksz = int(chunksz_str)
# Read chunk of data from file
file.seek(offset, os.SEEK_SET)
data = file.read(chunksz)
# Send resulting chunk to client
socket.send_multipart([identity, offset_str, data])
file.close()
return True
def server_thread(ctx, port):
router = ctx.socket(zmq.ROUTER)
socket_set_hwm(router, PIPELINE * 2)
tcp = "tcp://*:%d" % port
router.bind(tcp)
print 'binding %s \n' % tcp
while True:
ret = send_one(router)
if not ret:
break
return
if __name__ == '__main__':
begint = time.time()
onlyfiles = [os.path.join(mypath, f) for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f)) ]
fileq = Queue(len(onlyfiles))
for f in onlyfiles:
fileq.put_nowait(f)
ctx = zmq.Context()
servers = [Thread(target=server_thread, args=(ctx, port,)) for port in ports]
[server.start() for server in servers]
[server.join() for server in servers]
endt = time.time()
print 'finish: %s' % (endt - begint) |
worker.py | from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.runtime_env import working_dir as working_dir_pkg
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
UTIL_WORKER_MODE = 5
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(
self.core_worker.get_job_config().runtime_env.raw_json)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
if os.environ.get("RAY_ENABLE_AUTO_CONNECT", "") != "0":
ray.client().connect()
return
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = logging.INFO,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_lru_evict: bool = False,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if runtime_env:
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect
working_dir_pkg.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"autoscaling status. To disable autoscaler event "
"messages, you can set AUTOSCALER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data["pid"] in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data["actor_name"]:
res = data["actor_name"] + " " + res
elif data["task_name"]:
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str]) -> str:
"""The color for this log line."""
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
elif data["pid"] == "autoscaler":
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data["pid"] == "autoscaler":
pid = "{} +{}".format(data["pid"], time_string())
lines = filter_autoscaler_events(data["lines"])
else:
pid = data["pid"]
lines = data["lines"]
if data["ip"] == data["localhost"]:
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM, color_for(data),
prefix_for(data), pid,
colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), prefix_for(data), pid,
data["ip"], colorama.Style.RESET_ALL, line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE,
UTIL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid)
worker.gcs_client = worker.core_worker.get_gcs_client()
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environmen will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
working_dir_pkg.upload_runtime_env_package_if_needed(job_config)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE, UTIL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
worker=None,
retry_exceptions=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries, runtime_env)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
override_environment_variables (Dict[str, str]): (Deprecated in Ray
1.4.0, will be removed in Ray 1.6--please use the ``env_vars``
field of :ref:`runtime-environments` instead.) This specifies
environment variables to override for the actor or task. The
overrides are propagated to all child actors and tasks. This
is a dictionary mapping variable names to their values. Existing
variables can be overridden, new ones can be created, and an
existing variable can be unset by setting it to an empty string.
Note: can only be set via `.options()`.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns", "num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "accelerator_type", "max_calls", "max_restarts",
"max_task_retries", "max_retries", "runtime_env", "retry_exceptions"
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
retry_exceptions = kwargs.get("retry_exceptions")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
worker=worker,
retry_exceptions=retry_exceptions)
|
mbarete.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Modulos importados
import sqlite3 #mini gestor de base de datos, muy util para pruebas y almacenar datos de nuestro programa
import time #para hacer calculos de tiempo
import datetime #para abotener y manipular datos de fechas
import os #libreria con todo los necesario para comunicar nuestro programa con el sistema
import math
from tkinter import * #libreria base, para las clases de interfaces de escritorio
import tkinter.colorchooser as colorchooser #modulo de la libreria tkinter para seleccionar color
import threading #para poder ejecutar procesos en segundo plano, estos proceso afectan al hilo principal, variables, funciones, etc,etc.
import socket
import urllib
import requests
#se identaran las siguentes clases en variables globales
#la variables globales permiten acceder a las propiedades y metodos, de estos objetos desde cualquier lugar
global variables, tablas, BBDD, RESET, ventana
#FUNCIONES UTILES
#PARA DIFERENTES TIPOS DE PROYECTOS, FUNCIONES PARA PRUEBAS, MANEJO DE DATOS, VALIDACIONES, ETC,ETC
def okString(string,para='Clave'):
myStr=string.strip()
notKey=['`','ñ','_',"'",'à','è','ì','ò','ù','.','']
yesKey=["'",'nh','',"'",'a','e','i','o','u','' ,'']
notUtf8=['`','ñ','_',"'",'','','','','','','','','','','','','','','']
if para=="Clave":
for char in notKey:
myStr=myStr.replace(char,'')
def ordenar(myDict,orden=1,ordenarCampo=""):
lenString=0
campo=ordenarCampo if (ordenarCampo in myDict[[key for key in myDict][0]]) else [key for key in myDict[[key for key in myDict][0]]][0]
relaciones=[[clave,myDict[clave][campo]] for clave in myDict ]
typesIn=['str','int','float']
typeString=str(type(relaciones[0][1]))[8:].replace("'>",'')
if typeString in typesIn:
relaciones=[[elemento[0],elemento[1].lower().strip() if typeString=='str' else elemento[1]] for elemento in relaciones]
menor=relaciones[0]
posicionMenor=0
ordenado={}
recorrer=range(0,len(relaciones),1)
for check in recorrer:
menor=relaciones[0]
posicionMenor=0
for x in range(0,len(relaciones),1):
if (menor[1] > relaciones[x][1]) and orden==1:
menor=relaciones[x]
posicionMenor=x
if (menor[1] < relaciones[x][1]) and orden==0:
menor=relaciones[x]
posicionMenor=x
relaciones.pop(posicionMenor)
ordenado.setdefault(menor[0],myDict[menor[0]])
return ordenado
else:
return myDict
def buscador(myDictionary,buscar=" ",exacta=0,campoClave="",clavesString=[" "],clavesList="",returnKeys=[]):
"""
ESTA FUNCION AUN DEBE OPTIMIZARCE...
la funcion retornara un diccionario con los elementos que si coincidan con la busqueda
myDictionary dict : diccionario que sera revisado para encontrar las coincidencias
buscar str : este elemento puede ignorarse en caso de ser necesario. Sino debera ingresar: buscar='palabraParaBuscar -palabraParaDescartar'
obs:
si no se le asigna ningun string a 'buscar', la funcion retornara la variable 'myDictionary' completa, solo seran aplicados los cambios de 'returnKeys' y 'campoClave'
Puede pasarle mas de una palabra para buscar, solo debe pasarlos separados y sin el signo negativo al inicio
Tambien puede pasarle mas de una palabraParaDescartar, solo deben tener un negativo al comienzo y deben estar separados.
Se pueden pasar solo palabrasParaDescartar y varias a la ves
exacta int : 1 o 0 para hacer la busqueda de forma excata o no.
Si exacta es igual a 0, Se aplicara:
if 'PaLaB'.lower() in 'PALABRA'.lower(): obs: Esto sera True, ya que 'PaLaB'.lower() si 'pertenece' a 'PALABRA'.lower()
Si exacta es igual a 1, Se aplicara:
if 'PaLaB' == 'PALABRA': Obs: esto sera False, ya que 'PaLaB' no es exactamente igual a 'PALABRA'
campoClave str : la funcion tomara el valor asignado a la clave 'campoClave' de cada elemento de 'myDictionary', luego en el nuevo diccionario que retornara esta funcion, se identificara a cada elemento con el valor que contenga 'campoClave' en 'myDictionary'.
Un ejemplo para ver como se aplica 'campoClave', por que ni yo entiendi lo que escribi jeje. Para ver como afecta 'campoClave' a 'para_retornar_final' debes cambiar "id" por "barCode" que serian codigos de Barra o "proveedorID":
campoClave='id'
para_retornar_inicial={
0:{'id':10025,'proveedorID':16,'barCode':6549849464,'name':'Producto 5001','list':['stock','precios','proveedor','sucursal']},
1:{'id':46887,'proveedorID':28,'barCode'9878954:,'name':'Producto 8000','list':['stock','precios','proveedor','sucursal']},
2:{'id':89795,'proveedorID':6,'barCode':456548984984,'name':'Producto 30003','list':['stock','precios','proveedor','sucursal']}
}
para_retornar_final={}
if campoClave in [ clave for clave in para_retornar_inicial[0] ]:
for clave_inicial in para_retornar_inicial:
clave_final = para_retornar_inicial[clave_inicial][campoClave]
para_retornar_final[clave_final] = para_retornar_inicial[clave_inicial]
else:
para_retornar_final = para_retornar_inicial
print(para_retornar_final)
clavesString list : lista de las claves que estan en 'myDictionary', y deben tomarse esos valores como Strings para hacer la busqueda
clavesList list : lista de las claves que estan en 'myDictionary', y deben tomarse esos valores como Listas y aplicar la busqueda en cada elemnto de esta lista
returnKeys list : la funcion retornara un diccionario, y cada uno de los elementos tambien sera un dicionario que solo tendra las claves que esten en 'returnKeys'
"""
search=buscar.strip()
omitir=[]
filtrar=[]
for x in search.split(" "):
if ("-" in x):
omitir+=[x.replace("-",'')]
else:
filtrar+=[x.replace("+",'')]
ret={}
lista=[]
myDict=myDictionary
if filtrar:
for search in filtrar:
ret={}
if exacta:
for clave in [palabra for palabra in myDict]:
if ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower())] if clavesList else False) :
lista+=[myDict[clave]]
else:
for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString]:
if (search.lower() in check):
lista+=[myDict[clave]]
else:
for clave in [palabra for palabra in myDict]:
if ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower()[0:len(search.lower())])] if clavesList else False) :
lista+=[myDict[clave]]
elif ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in [ checking.lower()[0:len(search)] for checking in check])]):
lista+=[myDict[clave]]
for ok in lista:
pasar={}
for key in [keyword for keyword in ok]:
pasar.setdefault(str(key),ok[key])
ret.setdefault(str(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]]),pasar)
myDict=ret
if omitir:
for search in omitir:
ret={}
if exacta:
for clave in [palabra for palabra in myDict]:
if not ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower())] if clavesList else False) :
lista+=[myDict[clave]]
elif not ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in check)]):
lista+=[myDict[clave]]
else:
for clave in [palabra for palabra in myDict]:
if not ([etiqu for etiqu in myDict[clave][clavesList] if (search.lower()==etiqu.lower()[0:len(search.lower())])] if clavesList else False) :
lista+=[myDict[clave]]
elif not ([check for check in [str(myDict[clave][claveString]).lower().split(' ') for claveString in clavesString] if (search.lower() in [ checking.lower()[0:len(search)] for checking in check])]):
lista+=[myDict[clave]]
for ok in lista:
pasar={}
for key in [keyword for keyword in ok]:
pasar.setdefault(key,ok[key])
ret.setdefault(str(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]]),pasar)
myDict=ret
if returnKeys:
ret={}
lista=[ myDict[clave] for clave in [palabra for palabra in myDict]]
for ok in lista:
pasar={}
for key in returnKeys:
pasar.setdefault(key,ok[key])
ret.setdefault(ok[campoClave if (campoClave in [clave for clave in ok]) else [clave for clave in ok][0]],pasar)
return ret
def strToUnicode(strng):
unicod=""
for x in str(strng):
unicod += r" "+str(ord(x))
return unicod.strip()
def unicodeToStr(unicod):
strng=""
if unicod.strip()=="":
return unicod
else:
for x in unicod.split(" "):
strng += str(chr(int(x)))
return strng.strip()
def strToVar(strng,type_class=None):
divisorDate=str(datetime.date.today())[4]
string=str(strng).strip()
start=["{","(","[","'",'"']
stop= ["}",")","]","'",'"']
formato=type_class
nivel=0
try:
if "-" in string:
ret=float(string[1:])
formato="negativo"
if ("." in string) and ("-" not in string):
ret=float(string)
formato="float"
if "." not in string and ("-" not in string):
ret=int(float(string))
formato="int"
except Exception as e:
formato=type_class
if not formato:
try:
if (divisorDate in string):
if (":" in string) and ( ":"==string.split(" ")[1][2]) and ( ":"==string.split(" ")[1][5]) and ( "."==string.split(" ")[1][8]):
formato="time"
elif (-1 < int(string.split(divisorDate)[0])) and (-1 < int(string.split(divisorDate)[1])) and (-1 < int(string.split(divisorDate)[2])):
formato="date"
except Exception as e:
formato=type_class
if string=="":
return string
elif formato == "date":
#retorna datetime.date(int dia, int mes, int año)
ret=datetime.date(int(string.split("-")[0]),int(string.split("-")[1]),int(string.split("-")[2]))
return ret
elif formato == "time":
#retorna datetime(año,mes,dia,hora,minutos,segundos,milisegundos)
ret=datetime.datetime(int(string[0:4]),int(string[5:7]),int(string[8:10]),int(string[11:13]),int(string[14:16]),int(string[17:19]),int(string[20:]))
return ret
elif (string[0]=="{") and (string[-1]=="}"):
#retorna dict
ret ={}
string=string[1:-1]
comasDivisores=[-1]
dosPuntos=[]
comilla=0
for x in range(0,len(string),1):
if string[x] in start:
if string[x] not in ['"',"'"]:
nivel += 1
if string[x]==comilla:
nivel -= 1
comilla = 0
elif (string[x] in ['"',"'"]) and (comilla not in ['"',"'"]):
nivel += 1
comilla=string[x]
elif string[x] in stop:
nivel -= 1
if (string[x]==",") and (nivel==0):
comasDivisores.append(x)
if (string[x]==":") and (nivel==0):
dosPuntos.append(x)
for x in range(1,len(comasDivisores),1):
ret.setdefault(strToVar(string[comasDivisores[x-1]+1:dosPuntos[x-1]]),strToVar(string[dosPuntos[x-1]+1:comasDivisores[x]]))
ret.setdefault(strToVar(string[comasDivisores[-1]+1:dosPuntos[-1]]),strToVar(string[dosPuntos[-1]+1:]))
return ret
elif (string[0]=="[") and (string[-1]=="]"):
ret =[]
string=string[1:-1]
comasDivisores=[-1]
comilla=0
for x in range(0,len(string),1):
if string[x] in start:
if string[x] not in ['"',"'"]:
nivel += 1
if string[x]==comilla:
nivel -= 1
comilla = 0
elif (string[x] in ['"',"'"]) and (comilla not in ['"',"'"]):
nivel += 1
comilla=string[x]
elif string[x] in stop:
nivel -= 1
if (string[x]==",") and (nivel==0):
comasDivisores.append(x)
for x in range(1,len(comasDivisores),1):
ret.append(strToVar(string[comasDivisores[x-1]+1:comasDivisores[x]]))
ret.append(strToVar(string[comasDivisores[-1]+1:]))
return ret
elif (string[0]=="(") and (string[-1]==")"):
#retornatupla
ret =[]
string=string[1:-1]
comasDivisores=[-1]
for x in range(0,len(string),1):
if string[x] in start:
nivel += 1
if string[x] in stop:
nivel -= 1
if (string[x]==",") and (nivel==0):
comasDivisores.append(x)
for x in range(1,len(comasDivisores),1):
ret.append(strToVar(string[comasDivisores[x-1]+1:comasDivisores[x]]))
ret.append(strToVar(string[comasDivisores[-1]+1:]))
return tuple(ret)
elif (string=="True") or (string=="False"):
return True if string=="True" else False
elif (formato=="negativo"):
return -1*float(string.replace('-',''))
elif (formato=="float"):
#retornamos un float
return float(string)
elif formato=="int":
#retornamos un int
return int(float(string))
elif ((formato!="int") and (formato!="float") and (formato!="date") and (formato!="negativo")):
#retornamos un str
if (string[0] in start) and (string[-1] in stop):
return string[1:-1]
else:
return string
else:
print("NO HAY FORMATO")
return string
def escalarHex(h="#ffffff",factor=1.0):
escala={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
#separamos el codigo de color #RRGGBB, en tres numeros enteros que equivalen a rojo -> RR, verde -> GG, azul -> BB.
RR=float((escala[h[1:3][0]])*16+(escala[h[1:3][1]])) #convertimos los dos primeros digitos del numero hexagesimal a numero entero, corresponde al ROJO
GG=float((escala[h[3:5][0]])*16+(escala[h[3:5][1]])) #convertimos el tercer y cuarto digito del numero hexagesimal a numero entero, corresponde al VERDE
BB=float((escala[h[5:][0]])*16+(escala[h[5:][1]])) #convertimos el quinto y sexto digito del numero hexagesimal a numero entero, corresponde al AZUL
RR= int(RR*factor if RR*factor <=255.0 else RR ) #escalamos el rojo
GG= int(GG*factor if GG*factor <=255.0 else GG ) #escalamos el verde
BB= int(BB*factor if BB*factor <=255.0 else BB ) #escalamos el azul
#generamas el string que seria el nuevo codigo de color #RRGGBB
ret='#' #todos los codigos de color deben comenzar con el signo numeral '#'
ret+=("" if RR>15 else "0")+str(hex(RR))[2:]
ret+=("" if GG>15 else "0")+str(hex(GG))[2:]
ret+=("" if BB>15 else "0")+str(hex(BB))[2:]
#print(ret)
return ret[0:7]
def gradient(poligono=[],x=0,y=0,height=0,width=0,rotacion=0,color1='#ffffff',color2='#000000'):
limite=height if rotacion==0 else width
if poligono:
print("No Degradamos Poligonos")
else:
poligono=[[100,0],[150,50],[50,50],[100,0]]
escala={'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
r1=float((escala[color1[1:3][0]])*16+(escala[color1[1:3][1]]))
g1=float((escala[color1[3:5][0]])*16+(escala[color1[3:5][1]]))
b1=float((escala[color1[5:][0]])*16+(escala[color1[5:][1]]))
r2=float((escala[color2[1:3][0]])*16+(escala[color2[1:3][1]]))
g2=float((escala[color2[3:5][0]])*16+(escala[color2[3:5][1]]))
b2=float((escala[color2[5:][0]])*16+(escala[color2[5:][1]]))
r=(r2-r1)/limite
g=(g2-g1)/limite
b=(b2-b1)/limite
lines=[]
for i in range(limite):
RR=int(r1+(r*i))
GG=int(g1+(g*i))
BB=int(b1+(b*i))
color='#'+("" if RR>15 else "0")+str(hex(RR))[2:]+("" if GG>15 else "0")+str(hex(GG))[2:]+("" if BB>15 else "0")+str(hex(BB))[2:]
lines +=[[x+(i if rotacion==0 else 0),y+(i if rotacion!=0 else 0),x+(i if rotacion==0 else height),y+(height if rotacion==0 else i),color]]
#,tags=("gradient",)
return lines
def buscarFunciones(myFile):
if myFile in os.listdir():
os.remove(myFile)
print(os.path.basename(__file__))
myScript=os.path.basename(__file__)
buscar=open(myScript)
ret=["Funciones={"]
for linea in [line.strip() if (("def " in line.strip()[0:4]) and (":" in line.strip()[-1])) else "self" for line in buscar]:
start=0
stop=0
for c in range(0,len(linea),1):
if "("==linea[c]:
start=c
elif ")"==linea[c]:
stop=c+1
if ("self" not in linea) and ("=" not in linea):
ret.append("'"+linea[4:start]+"':["+linea[4:start]+','+linea[start:stop]+"]")
ret.append(',')
ret[-1]="}"
buscar.close()
func=open(myFile,"w")
for string in ret:
func.write(string)
func.close()
import dictFunciones.Funciones as retorna
print(retorna)
return retorna
def myFuncion(inputType,nameWitget,comando):
if comando=="pruebaClickDerecho":
print("pruebaClickDerecho")
elif comando=="pruebaClickIzquierdo":
print("pruebaClickIzquierdo")
elif comando=="pruebaClickRueda":
print("pruebaClickRueda")
"""
Clases para deferentes tipos de proyectos
cesar escobar ieee ras una
"""
class mbarete(object):
"""
Esta clase sera para combinar las demas clases en este modulo,
funciones para poder tener una estructura escalable, y facil de desarrollar para proyectos mas complejos
"""
def __init__(self, pwd='',baseName='',ficheroCRUD='',nombre="Proyecto Mbarete",reset=0,gitignore=[],gitBranch='master',defaultCommand=[],campoAutoincrement='id',cargarScript='mbarete.py',archivosInternos=['__pycache__','__init__.py','media','bibliografia','preload.py'],formato=[''],fullDir=1,renombrarArchivos=0,ignorar=[]):
super(mbarete, self).__init__()
self.reset=reset
self.ubi=directorio(pwd=pwd,baseName=baseName,formato=formato,fullDir=fullDir,renombrarArchivos=renombrarArchivos,ignorar=ignorar)
self.archivosInternos=archivosInternos
self.nombre=nombre
if ficheroCRUD:
self.dirCRUD=self.ubi.pwd+os.path.sep+ficheroCRUD
self.ficheroCRUD=ficheroCRUD
else:
self.dirCRUD=self.ubi.pwd+os.path.sep+nombre.replace(' ','_')+"_CRUD_.sql"
self.ficheroCRUD=nombre.replace(' ','_')+"_CRUD_.sql"
self.campoAutoincrement=campoAutoincrement
self.cargarScript=cargarScript
self.info={}
self.subProyectoActivo=''
self.manager=''
self.subtransicionInicio='Inicio'
self.defaultCommand=defaultCommand
self.gitBranch=gitBranch
self.gitignore=gitignore
def start(self,G):
for subProyecto in self.info:
self.info[subProyecto]
gitignore=open('.gitignore','w')
for ig in self.gitignore:
gitignore.write('*'+ig+'*'+'\n')
gitignore.close()
self.transicion(G,self.manager)
G.loop()
def getInicio(self):
#esta funcion genera el menu de inicio del proyecto
ret={}
for subProyecto in self.info:
ret[subProyecto]={
'inputType':'Button',
'command':'transicion_'+subProyecto,
'text':self.info[subProyecto]['info']['text']
}
return ret
def transicion(self,G,entra):
if self.subProyectoActivo:
for ocultar in self.info[self.subProyectoActivo]['widget']:
G.widgets[self.info[self.subProyectoActivo]['widget'][ocultar]]['visible']=0
for mostrar in self.info[entra]['widget']:
#print(mostrar,self.info[entra]['etiquetas'][mostrar],self.info[entra]['subtransicion']['aceptar'],sum([1 if (aceptar in self.info[entra]['etiquetas'][mostrar]) else 0 for aceptar in self.info[entra]['subtransicion']['aceptar']]),sum([1 if (ignorar in self.info[entra]['etiquetas'][mostrar]) else 0 for ignorar in self.info[entra]['subtransicion']['ignorar']]))
if sum([1 if (aceptar in self.info[entra]['etiquetas'][mostrar]) else 0 for aceptar in self.info[entra]['subtransicion']['aceptar']]) > 0 and sum([1 if (ignorar in self.info[entra]['etiquetas'][mostrar]) else 0 for ignorar in self.info[entra]['subtransicion']['ignorar']])==0:
G.widgets[self.info[entra]['widget'][mostrar]]['visible']=1
G.title(self.info[entra]['subtransicion']['aceptar'][0]+' - '+self.info[entra]['info']['text']+' - '+self.nombre)
G.update()
G.atrb['transicion']=entra
self.subProyectoActivo=entra
def subtransicion(self,G,entra,etiquetas):
e=etiquetas.strip()
ignorar=[]
aceptar=[]
for x in e.split(" "):
if ("-" in x):
ignorar+=[x.replace("-",'').strip()]
else:
aceptar+=[x.replace("+",'').strip()]
self.info[entra]['subtransicion']={'aceptar':aceptar+['default'],'ignorar':ignorar}
G.atrb['subtransicion']=self.info[entra]['subtransicion']
self.transicion(G,entra)
def preload(self):
subProyectos=self.ubi.listaDeCarpetas(self.ubi.pwd,ignorar=self.archivosInternos)
load=open(self.ubi.pwd+os.path.sep+'preload.py','w')
load.write(r'proyectos={}'+'\n')
for subP in subProyectos:
if self.cargarScript in self.ubi.listaDeFicheros(self.ubi.pwd+os.path.sep+subP,ignorar=self.archivosInternos):
load.write('from .'+subP+' import '+self.cargarScript[:-3]+' as '+subP+'\n')
elif subP+'.py' in self.ubi.listaDeFicheros(self.ubi.pwd+os.path.sep+subP,ignorar=self.archivosInternos):
load.write('from .'+subP+' import '+subP+' as '+subP+'\n')
load.write("proyectos['"+subP+"']="+subP+'\n')
load.write("def command(admin,G,manager):"+'\n')
for subP in subProyectos:
load.write(" G.command[manager+'_transicion_"+subP+"']=lambda : admin.transicion(G,'"+subP+"')"+'\n')
load.close()
#from diseñoLibre.preload import proyectos
def getWidget(self,subProyecto,widgets,info):
w=widgets
info['pwd']=self.ubi.pwd+os.path.sep+subProyecto
metadata={'subProyecto':subProyecto,'command':{},'widget':{},'info':info,'etiquetas':{},'subtransicion':{'aceptar':[self.subtransicionInicio,'default'],'ignorar':[]}}
for widget in w:
metadata['widget'][w[widget]['name']]=subProyecto+'_'+w[widget]['name']
metadata['etiquetas'][w[widget]['name']] = w[widget]['etiquetas']
w[widget]['subProyecto']=subProyecto
w[widget]['name']=subProyecto+'_'+w[widget]['name']
w[widget]['visible']=0
if 'crearTabla' in w[widget]:
if w[widget]['crearTabla']:
for i in self.defaultCommand:
w[widget]['inputs'][i]=self.defaultCommand[i]
for i in w[widget]['inputs']:
if (w[widget]['inputs'][i]['inputType'] in ['Button','Checkbutton']) and ('command' in w[widget]['inputs'][i]):
if w[widget]['inputs'][i]['command'] in self.defaultCommand:
metadata['command'][w[widget]['inputs'][i]['command']] = w[widget]['inputs'][i]['command']
else:
metadata['command'][w[widget]['inputs'][i]['command']] = subProyecto+'_'+w[widget]['inputs'][i]['command']
w[widget]['inputs'][i]['command']=subProyecto+'_'+w[widget]['inputs'][i]['command']
self.info[subProyecto]=metadata
return w
class git(object):
"""docstring for git"""
def __init__(self, branch='',userName=''):
super(git, self).__init__()
self.branch=''
self.userName=''
self.userEmail=''
self.commit=''
self.userGithub=''
self.repoGithub=''
self.branchGithub=''
self.gitignore=''
self.commitGithub=''
class microservicio(object):
"""
Clase para poder tener microservicios y nodos con contenedores docker o postman.
Crear y administrar Microservicios en una red local por lo menos
Esta clase no fue estudiada a fundo, osea que esta en desuso
El objetivo es tener una formade conectarse a nuestro proyecto desde distintas maquinas
Definiendo una maquina Servidor, para luego este poder ser accedido desde otras maquinas
"""
def __init__(self,wan_url='https://8.8.8.8'):
super(microservicio, self).__init__()
self.lan_ip=self.get_lan_ip()
self.wan_ip=self.get_wan_ip(wan_url)
def get_lan_ip(self):
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255',1))
ip=s.getsockname()
except:
ip='127.0.0.1'
finally:
s.close()
return ip
def get_wan_ip(self, wan_url):
wan_ip = urllib.request.urlopen(wan_url).read().decode('utf8')
return wan_ip
class calculadora(object):
"""
En el plano, las coordenadas cartesianas se denominan abscisa y ordenada.
La abscisa es la coordenada horizontal y se representa habitualmente por la letra X,
mientras que la ordenada es la coordenada vertical y se representa por la Y.
a continuacion informacion de algunas librerias importantes
LIBRERIA MATH
import math
Traducido de https://www.w3schools.com/python/module_math.asp
Python tiene un módulo incorporado que puede usar para tareas matemáticas.
El módulo de matemáticas tiene un conjunto de métodos y constantes.
Constantes matemáticas
math.e Devuelve el número de Euler (2.7182 ...)
math.inf Devuelve un infinito positivo en coma flotante
math.nan Devuelve un valor NaN (no un número) de coma flotante
math.pi Devuelve PI (3.1415 ...)
math.tau Devuelve tau (6.2831 ...)
Descripción del método
math.acos () Devuelve el arco coseno de un número
math.acosh () Devuelve el coseno hiperbólico inverso de un número
math.asin () Devuelve el arco seno de un número
math.asinh () Devuelve el seno hiperbólico inverso de un número
math.atan () Devuelve el arco tangente de un número en radianes
math.atan2 () Devuelve el arco tangente de y / x en radianes
math.atanh () Devuelve la tangente hiperbólica inversa de un número
math.ceil () Redondea un número al entero más cercano
math.comb () Devuelve el número de formas de elegir k elementos de n elementos sin repetición ni orden
math.copysign () Devuelve un flotante que consta del valor del primer parámetro y el signo del segundo parámetro
math.cos () Devuelve el coseno de un número
math.cosh () Devuelve el coseno hiperbólico de un número
math.degrees () Convierte un ángulo de radianes a grados
math.dist () Devuelve la distancia euclidiana entre dos puntos (pyq), donde pyq son las coordenadas de ese punto
math.erf () Devuelve la función de error de un número
math.erfc () Devuelve la función de error complementaria de un número
math.exp () Devuelve E elevado a la potencia de x
math.expm1 () Devuelve Ex - 1
math.fabs () Devuelve el valor absoluto de un número
math.factorial () Devuelve el factorial de un número
math.floor () Redondea un número hacia abajo al entero más cercano
math.fmod () Devuelve el resto de x / y
math.frexp () Devuelve la mantisa y el exponente de un número especificado
math.fsum () Devuelve la suma de todos los elementos en cualquier iterable (tuplas, matrices, listas, etc.)
math.gamma () Devuelve la función gamma en x
math.gcd () Devuelve el máximo común divisor de dos enteros
math.hypot () Devuelve la norma euclidiana
math.isclose () Comprueba si dos valores están cerca uno del otro, o no
math.isfinite () Comprueba si un número es finito o no
math.isinf () Comprueba si un número es infinito o no
math.isnan () Comprueba si un valor es NaN (no un número) o no
math.isqrt () Redondea un número de raíz cuadrada hacia abajo al entero más cercano
math.ldexp () Devuelve el inverso de math.frexp () que es x * (2 ** i) de los números dados x e i
math.lgamma () Devuelve el valor log gamma de x
math.log () Devuelve el logaritmo natural de un número o el logaritmo de un número en base
math.log10 () Devuelve el logaritmo en base 10 de x
math.log1p () Devuelve el logaritmo natural de 1 + x
math.log2 () Devuelve el logaritmo en base 2 de x
math.perm () Devuelve el número de formas de elegir k elementos de n elementos con orden y sin repetición
math.pow () Devuelve el valor de x elevado a y
math.prod () Devuelve el producto de todos los elementos en un iterable
math.radians () Convierte un valor de grado en radianes
math.remainder () Devuelve el valor más cercano que puede hacer que el numerador sea completamente divisible por el denominador
math.sin () Devuelve el seno de un número
math.sinh () Devuelve el seno hiperbólico de un número
math.sqrt () Devuelve la raíz cuadrada de un número
math.tan () Devuelve la tangente de un número
math.tanh () Devuelve la tangente hiperbólica de un número
math.trunc () Devuelve las partes enteras truncadas de un número
LIBRERIA STATISTICS
import statistics
TRADUCIDI DE https://www.w3schools.com/python/module_statistics.asp
Python tiene un módulo incorporado que puede usar para calcular estadísticas matemáticas de datos numéricos.
El módulo de estadísticas era nuevo en Python 3.4.
Descripción del método
statistics.harmonic_mean () Calcula la media armónica (ubicación central) de los datos dados
statistics.mean () Calcula la media (promedio) de los datos dados
statistics.median () Calcula la mediana (valor medio) de los datos dados
statistics.median_grouped () Calcula la mediana de datos continuos agrupados
statistics.median_high () Calcula la mediana alta de los datos dados
statistics.median_low () Calcula la mediana baja de los datos dados
statistics.mode () Calcula el modo (tendencia central) de los datos numéricos o nominales dados
statistics.pstdev () Calcula la desviación estándar de una población completa
statistics.stdev () Calcula la desviación estándar de una muestra de datos
statistics.pvariance () Calcula la varianza de una población completa
statistics.variance () Calcula la varianza a partir de una muestra de datos
"""
def __init__(self, ec={},constantes={},extras={},config={'angulos':'radianes'}):
super(calculadora, self).__init__()
self.error=0.0000001
self.masInf=999999999999.9
self.menosInf=-999999999999.9
self.alfa=0.0
self.factoresDeEscala=[1.0,1.0]
self.trasladarOrigen=[0.0,0.0]
self.escalarOrigen=[0.0,0.0]
self.rotarOrigen=[0.0,0.0]
self.historial={}
self.ec={}
self.ecuaciones=[]
self.constantes={'e':math.e,'pi':math.pi,'g':9.8182}
self.extras=extras
self.operadores=['w(','sen(','cos(','tg(','log(','ln(','lambert(','dy(','sec(','cosec(','cotag(','arcsen(','arccos(','arctg(','round(','floor(','ceil(','signo(','abs(']
self.simbolos=['*','(',')','/','+','-','.','%']
def update(self, ec={},constantes={},extras={}):
if constantes:
for C in constantes:
self.constantes[C]=constantes[C]
def setEcuacion(self, nombre, string='', variable='x',constantes={},extras={}):
if constantes:
self.update(constantes=constantes)
self.ec[nombre]=self.strToMath(string=string, variable=variable)
self.ecuaciones=[ecu for ecu in self.ec]
print("Se agrego Exitosamente: '",nombre,"':",self.ec[nombre](variable,p=1))
def inversa(self,ordenada,f=None,error=None):
ordenada=float(ordenada)
mayor=0.0
menor=0.0
abscisa=0.0
while f(mayor)<ordenada:
mayor += 10.0
while f(menor)>ordenada:
menor -= 1.0
while ((ordenada-f(abscisa))**(2))**(1/2) > error:
if ordenada<f((mayor+menor)/2.0):
mayor=(mayor+menor)/2.0
elif ordenada>f((mayor+menor)/2.0):
menor=(mayor+menor)/2.0
abscisa=(mayor+menor)/2.0
return abscisa
def strToMath(self,string='',variable='x',dy=0,p=0,c=None,decimales=4,signo=None,v=0,composicion=0):
if not v:
print('validando',string,composicion)
v=1
composicion += 1
nivel=0
esSuma=0
signoSuma=[0]
esProducto=0
signoProducto=[0]
esDivision=0
signoDivision=[0]
esExponente=0
signoExponente=[0]
esResto=0
signoResto=[0]
operador=1
monomio=1
parentesis=1
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
monomio=0
if string[x] in '-+*/%' and nivel==0:
if x>0:
parentesis=0
if monomio:
if string[0] in '+' and nivel==0:
sig= 1.0
string=string[1:]
elif string[0] in '-' and nivel==0:
sig=-1.0
string=string[1:]
else:
sig= 1.0
string=string.strip()
else:
sig=1.0
if parentesis:
if ('(' in string[0]) and (')' in string[-1]):
string=string[1:-1]
string=string.strip()
monomio=1
parentesis=1
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
monomio=0
if string[x] in '-+*/%' and nivel==0:
if x>0:
parentesis=0
if monomio:
if string[0] in '+' and nivel==0:
sig= 1.0*sig
string=string[1:]
elif string[0] in '-' and nivel==0:
sig=-1.0*sig
string=string[1:]
string=string.strip()
if parentesis:
if ('(' in string[0]) and (')' in string[-1]):
string=string[1:-1]
string=string.strip()
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in '-+' and nivel==0:
if x>0:
esSuma=1
signoSuma += [x]
if not monomio:
operador=0
if (string[x] == '*') and ( '*' != string[x+1]) and ( '*' != string[x-1]) and nivel==0:
esProducto=1
signoProducto += [x]
operador=0
if string[x] in '/' and nivel==0:
esDivision=1
signoDivision += [x]
operador=0
if (string[x] == '*') and ( '*' == string[x+1]) and nivel==0:
esExponente=1
signoExponente += [x]
operador=0
if (string[x] == '%') and nivel==0:
esResto=1
signoResto += [x]
operador=0
if operador:
x=0
coincide=[op for op in self.operadores+self.ecuaciones if op in (string if len(op)<len(string) else '')]
if coincide:
comas=[0]
for x in range(0,len(string),1):
if string[x]=='(':
nivel += 1
if string[x]==')':
nivel -= 1
if string[x] in ',' and nivel==0:
comas += [x]
if string[:len('w(')] in 'w(' and nivel==0:
pass
if string[:len('dy(')] in 'dy(' and nivel==0:
pass
if string[:len('log(')] in 'log(' and nivel==0:
#math.log(x,base)
print('log',string)
parteReal=self.strToMath(string=string[len('log'):comas[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
if len(comas)==1:
base=self.strToMath(string='10.0',dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
else:
base=self.strToMath(string=string[comas[1]+1:-1],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def logaritmoNatural(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,parteReal=parteReal,base=base):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
numerador='(('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')-('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'))'
return s+'('+numerador+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')'
else:
numerador=signo*((parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1))-(base(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/base(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)))
return numerador/((math.log(base(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)))**2)
else:
if p:
return s+'ln('+parteReal(x,p=p,decimales=decimales,mostrarSigno=1)+','+base(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.log(parteReal(x),base(x))
return logaritmoNatural
if string[:len('ln(')] in 'ln(' and nivel==0:
#math.log(x,base)
print('ln',string)
parteReal=self.strToMath(string=string[len('ln'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def logaritmoNatural(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,parteReal=parteReal):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'('+parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'/'+parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*(parteReal(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)/parteReal(x,p=p,dy=0,decimales=decimales,mostrarSigno=1))
else:
if p:
return s+'ln('+parteReal(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.log(parteReal(x))
return logaritmoNatural
if string[:len('abs(')] in 'abs(' and nivel==0:
#math.fabs(-66.43)
print('abs',string)
valor=self.strToMath(string=string[len(''):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def valorAbsoluto(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,u=valor):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'(('+valor(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'/abs('+valor(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+'))*('+valor(x,p=p,dy=1,decimales=decimales,mostrarSigno=1)+'))'
else:
return signo*((valor(x,p=p,dy=0,decimales=decimales)/math.fabs(valor(x,p=p,dy=0,decimales=decimales)))*valor(x,p=p,dy=1,decimales=decimales))
else:
if p:
return s+'abs('+valor(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.fabs(valor(x))
return valorAbsoluto
if string[:len('tg(')] in 'tg(' and nivel==0:
#math.tan()
print('tg',string)
radian=self.strToMath(string=string[len('tg'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def tangente(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'((1+tg('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')**2)*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+'))'
else:
return signo*(1+math.tan(radian(x))**2)*radian(x,dy=dy)
else:
if p:
return s+'tg('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.tan(radian(x))
return tangente
if string[:len('sen(')] in 'sen(' and nivel==0:
#math.sin()
print('sen',string)
radian=self.strToMath(string=string[len('sen'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def seno(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'cos('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.cos(radian(x))*radian(x,dy=dy)
else:
if p:
return s+'sen('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.sin(radian(x))
return seno
if string[:len('cos(')] in 'cos(' and nivel==0:
#math.cos()
print('cos',string)
radian=self.strToMath(string=string[len('cos'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def coseno(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,radian=radian):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
s=('-' if signo>0.0 else '+') if mostrarSigno else ''
return s+'sen('+radian(x,dy=0,p=p,decimales=decimales,mostrarSigno=1)+')*('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return -1*signo*math.sin(radian(x))*radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)
else:
if p:
return s+'cos('+radian(x,dy=dy,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.cos(radian(x))
return coseno
if string[:len('arcsen(')] in 'arcsen(' and nivel==0:
#math.asin()
pass
if string[:len('arccos(')] in 'arccos(' and nivel==0:
#math.acos()
pass
if string[:len('arctg(')] in 'arctg(' and nivel==0:
#math.atan()
pass
if string[:len('signo(')] in 'signo(' and nivel==0:
pass
if string[:len('entero(')] in 'entero(' and nivel==0:
pass
if string[:len('decimal(')] in 'decimal(' and nivel==0:
pass
if string[:len('round(')] in 'round(' and nivel==0:
print('round',string)
redondeo=self.strToMath(string=string[len('round'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondear(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,redondeo=redondeo):
if mostrarSigno:
s='+' if signo>=0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'round('+defecto(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.round(defecto(x))
return redondear
if string[:len('floor(')] in 'floor(' and nivel==0:
print('floor',string)
defecto=self.strToMath(string=string[len('floor'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondearHaciaAbajo(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,defecto=defecto):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'floor('+defecto(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.floor(defecto(x))
return redondearHaciaAbajo
if string[:len('ceil(')] in 'ceil(' and nivel==0:
print('ceil',string)
exceso=self.strToMath(string=string[len('ceil'):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def redondearHaciaArriba(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,exceso=exceso):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0.0
else:
if p:
return s+'ceil('+exceso(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*math.ceil(exceso(x))
return redondearHaciaArriba
if [op for op in self.ecuaciones if op in (string if len(op)<len(string) else '')] and nivel==0:
miEcuacion=''
for op in self.ecuaciones:
if op in (string[:len(op)] if len(op)<len(string) else ''):
miEcuacion=op
print(miEcuacion,string)
myF=self.ec[miEcuacion]
ecuacionInterna=self.strToMath(string=string[len(miEcuacion):],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def f(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0,miEcuacion=miEcuacion,myF=myF,ecuacionInterna=ecuacionInterna):
#f(x,dy=dy,p=p,decimales=decimales,mostrarSigno=0)
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'('+myF(ecuacionInterna(x,p=p,dy=0,decimales=decimales,mostrarSigno=1),p=p,dy=1,decimales=decimales,mostrarSigno=0)+')*('+ecuacionInterna(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)+')'
else:
ret = myF(ecuacionInterna(x,p=p,dy=0,decimales=decimales,mostrarSigno=1),p=p,dy=1,decimales=decimales,mostrarSigno=0)*ecuacionInterna(x,p=p,dy=0,decimales=decimales,mostrarSigno=1)
return signo*ret
else:
if p:
return s+myF('('+ecuacionInterna(x,p=p,decimales=decimales,mostrarSigno=0)+')',p=p,decimales=decimales)
else:
return signo*myF(ecuacionInterna(x))
return f
else:
esConstante=1
"""
if string[:len('')] in '' and nivel==0:
print('',string)
=strToMath(string=string[len(''):],dy=dy,p=p,decimales=decimales,v=v)
def op(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0):
#f(x,dy=dy,p=p,decimales=decimales,mostrarSigno=0)
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s
else:
ret =
return signo*ret
else:
if p:
return s
else:
return signo*
return op
"""
else:
c=None
if string in self.constantes:
c=self.constantes[string]
elif sum([1 for l in string if ((48<=ord(l) and ord(l)<=57) or (ord(l)==46))])==len(string):
c=float(string)
if c:
print('constante',c)
def constante(x,dy=dy,p=p,c=c,decimales=decimales,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return '0.0'
else:
return 0
else:
if p:
c=str(c)
#[:decimales if len(c)>decimales else None]
return s+c
else:
return c*signo
return constante
if string==variable:
print('variable',string,sig)
def variable(x,dy=dy,p=p,decimales=decimales,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>=0.0 else '-'
else:
s=''
if dy:
if p:
return '1.0'
else:
return 1.0
else:
if p:
return s+str(x)[:decimales]
else:
return x*signo
return variable
else:
#parentecis,exponente/radicales,multiplicacion/division,suma/resta
if esSuma:
print('suma',string,signoSuma)
if len(signoSuma)==1:
sumandos=[self.strToMath(string=string[1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
else:
sumandos=[]
for sumando in range(0,len(signoSuma)-1,1):
sumandos+=[self.strToMath(string=string[signoSuma[sumando]:signoSuma[sumando+1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
sumandos+=[self.strToMath(string=string[signoSuma[-1]:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
def suma(x,dy=dy,p=p,decimales=decimales,sumandos=sumandos,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
ret = s
for sumando in sumandos:
ret += sumando(x,p=p,dy=dy,decimales=decimales,mostrarSigno=1)
return ret
else:
return signo*sum([sumando(x,dy=dy) for sumando in sumandos])
else:
if p:
ret = ''
for sumando in sumandos:
ret += sumando(x,p=p,decimales=decimales,mostrarSigno=1)
return ret
else:
ret = 0.0
for sumando in sumandos:
ret += sumando(x)
return signo*ret
return suma
elif esDivision:
print('division',string,signoDivision)
signoDivision+=[]
numerador=self.strToMath(string=string[0:signoDivision[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
denominador=self.strToMath(string=string[signoDivision[1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def division(x,dy=dy,p=p,decimales=decimales,numerador=numerador,denominador=denominador,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'(('+numerador(x,p=p,dy=1,decimales=decimales)+')*('+denominador(x,p=p,dy=0,decimales=decimales)+')-('+numerador(x,p=p,dy=0,decimales=decimales)+')*('+denominador(x,p=p,dy=1,decimales=decimales)+'))/(('+denominador(x,p=p,dy=0,decimales=decimales)+')**2)'
else:
return signo*((numerador(x,p=p,dy=1,decimales=decimales)*denominador(x,p=p,dy=0,decimales=decimales))-(numerador(x,p=p,dy=0,decimales=decimales)*denominador(x,p=p,dy=1,decimales=decimales)))/(denominador(x,p=p,dy=0,decimales=decimales)**2)
else:
if p:
return s+'('+numerador(x,p=p,dy=0,decimales=decimales)+')/('+denominador(x,p=p,dy=0,decimales=decimales)+')'
else:
return signo*numerador(x,dy=0,decimales=decimales)/denominador(x,dy=0,decimales=decimales)
return division
elif esResto:
print('resto',string,signoResto)
signoResto+=[]
numerador=self.strToMath(string=string[0:signoResto[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
denominador=self.strToMath(string=string[signoResto[1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def restoPorDefecto(x,dy=dy,p=p,decimales=decimales,numerador=numerador,denominador=denominador,signo=sig,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return ''
else:
return None
else:
if p:
return s+'('+numerador(x,p=p,dy=0,decimales=decimales)+'%'+denominador(x,p=p,dy=0,decimales=decimales)+')'
else:
return signo*numerador(x,dy=0,decimales=decimales)%denominador(x,dy=0,decimales=decimales)
return restoPorDefecto
elif esProducto:
print('producto',string,signoProducto)
factores=[]
for factor in range(0,len(signoProducto)-1,1):
factores+=[self.strToMath(string=string[signoProducto[factor]+(1 if 0<factor else 0 ):signoProducto[factor+1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
factores+=[self.strToMath(string=string[signoProducto[-1]+1:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)]
def producto(x,dy=dy,p=p,decimales=decimales,signo=sig,factores=factores,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
ret=s+'('
factor='('
for derivar in range(0,len(factores),1):
factor=factores[derivar](x,dy=1,p=p,decimales=decimales)
for escalar in range(0,len(factores),1):
if not (derivar == escalar):
factor += '*'+factores[escalar](x,dy=0,p=p,decimales=decimales)
ret += factor+')+'
return ret[:-1]+')'
else:
ret=0.0
factor=1.0
for derivar in range(0,len(factores),1):
factor=factores[derivar](x,dy=1,p=p,decimales=decimales)
for escalar in range(0,len(factores),1):
if not (derivar == escalar):
factor*=factores[escalar](x,dy=0,p=p,decimales=decimales)
ret += factor
return signo*ret
else:
if p:
ret = s+'('+factores[0](x,dy=0,p=p,decimales=decimales)
for factor in factores[1:]:
ret += '*'+factor(x,dy=0,p=p,decimales=decimales)
return ret+')'
else:
ret = 1.0
for factor in factores:
ret *= factor(x,dy=0,p=0)
return signo*ret
return producto
elif esExponente:
print('exponente',string,signoExponente)
signoExponente+=[]
base=self.strToMath(string=string[0:signoExponente[1]],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
exponente=self.strToMath(string=string[signoExponente[1]+2:],dy=dy,p=p,decimales=decimales,v=v,composicion=composicion)
def potencia(x,dy=dy,p=p,decimales=decimales,signo=sig,base=base,exponente=exponente,mostrarSigno=0):
if mostrarSigno:
s='+' if signo>0.0 else '-'
else:
s=''
if dy:
if p:
return s+'((('+exponente(x,dy=0,p=p,decimales=decimales)+'*('+base(x,dy=0,p=p,decimales=decimales)+'**('+exponente(x,dy=0,p=p,decimales=decimales)+'-1))*'+base(x,dy=1,p=p,decimales=decimales)+') + ('+exponente(x,dy=1,p=p,decimales=decimales)+'*('+base(x,dy=0,p=p,decimales=decimales)+'**'+exponente(x,dy=0,p=p,decimales=decimales)+')*ln('+base(x,dy=0,p=p,decimales=decimales)+'))))'
else:
ret = exponente(x,dy=0,p=p,decimales=decimales)*(base(x,dy=0,p=p,decimales=decimales)**(exponente(x,dy=0,p=p,decimales=decimales)-1))*base(x,dy=1,p=p,decimales=decimales) + exponente(x,dy=1,p=p,decimales=decimales)*(base(x,dy=0,p=p,decimales=decimales)**exponente(x,dy=0,p=p,decimales=decimales))*math.log(base(x,dy=0,p=p,decimales=decimales))
return signo*ret
else:
if p:
return s+base(x,p=p,decimales=decimales)+'**('+exponente(x,p=p,decimales=decimales,mostrarSigno=1)+')'
else:
return signo*base(x)**exponente(x)
return potencia
def escalar(self, o=None,factor=None):
if not o:
o=self.escalarOrigen
if not factor:
factor=self.factoresDeEscala
def ecuacionEscalada(abscisa,factor=factor,o=o,f=self.f,extras=self.extras):
ordenada=factor[0]*(f(((abscisa-o[0])/factor[0])+o[0],extras=extras)-o[1])+o[1]
return ordenada
self.extras['string'] = '('+str(factor[0])+'*('+self.extras['string'].replace('x','(((x-('+str(o[0])+'))/'+str(factor[0])+')'+str(o[0])+')')+'-('+str(o[1])+'))+('+o[1]+'))'
self.escalarOrigen=o
self.factoresDeEscala=factor
self.f = ecuacionEscalada
self.historial[len(self.historial)] = {'escalar':{'o':o,'factor':factor}}
def trasladar(self, o=None):
if not o:
o=self.trasladarOrigen
def ecuacionTrasladada(abscisa,o=o,f=self.f,extras=self.extras):
ordenada=f(abscisa+o[0],extras=extras)-o[1]
return ordenada
self.extras['string'] = '('+self.extras['string'].replace('x','(x+'+str(o[0])+')')+'-('+str(o[1])+'))'
self.f = ecuacionTrasladada
self.trasladarOrigen=o
self.historial[len(self.historial)] = {'trasladar':{'o':o}}
def rotar(self, o=None,alfa=None):
if not o:
o=self.rotarOrigen
if not alfa:
alfa=self.alfa
#sobre el punto 'o', rotara el angulo 'alfa', en sentido antiorario
self.extras['alfa']=alfa
def ecuacionRotada(abscisa,alfa=alfa,o=o,f=self.f,extras=self.extras):
abscisa = self.inversa(abscisa,f= lambda abscisa : (abscisa-o[0])*math.cos(alfa)-(f(abscisa,extras=extras)-o[1])*math.sin(alfa)+o[0])
ordenada = (abscisa-o[0])*math.sen(alfa)+(f(abscisa,extras=extras)-o[1])*math.cos(alfa)+o[1]
return ordenada
#self.extras['string'] = '(((x-('+str(o[0])+'))*('+str(math.sin(alfa))+'))+(('+self.extras['string']+'-('+str(o[1])+'))*('+str(math.cos(alfa))+'))+('+str(o[1])+'))'
self.f = ecuacionRotada
self.alfa=alfa
self.rotarOrigen=o
self.historial[len(self.historial)] = {'rotar':{'o':o,'alfa':alfa}}
def deshacer(self):
print('deshacer')
def rehacer(self):
print('rehacer')
class directorio(object):
#pwd= es la ruta completa donde se ejecutara la clase,'C:\rutacompleta\carpetadelprograma'
#baseName= solo el nombre de la carpeta donde se ejecutara la clase,'carpetadelprograma'
#formato= retornara solo estos formatos en la lista de archivos dentro de la carpeta 'baseName' y las sub-carpetas, ['video','audio','imagen,'script','documento']
#fullDir=guardara la ruta absoluta de cada uno de los ficheros, dentro de la lista de ficheros y sub-carpetas
#renombrarArchivos= En los nombres de los archivos, cambiara los caracteres que no pertenecen a UTF-8. Esto evitara muchos errores de lectura y escritura
def __init__(self,pwd='',baseName='',formato=[''],fullDir=1,renombrarArchivos=0,ignorar=[]):
super(directorio, self).__init__()
self.s=os.path.sep
if baseName:
self.baseName=baseName
self.pwd= pwd if pwd else (os.getcwd()+self.s+self.baseName)
else:
self.baseName=os.getcwd().split(self.s)[-1]
self.pwd=os.getcwd()
self.prefijos=['N','U']
self.ID=0
self.index={}
self.full=fullDir
self.renombrarArchivos=renombrarArchivos
self.ignorar=ignorar
self.formatos=self.formato(formato)
#print(self.pwd,self.baseName,self.ignorar)
#print('Formatos:',self.formatos)
self.directorio=self.carpetas(self.pwd,self.baseName)
self.tabla(self.directorio,0,self.pwd)
if self.renombrarArchivos:
self.habilitar()
def tabla(self,d,nivel,pwd):
tab=" "
print(tab*nivel+self.s+d['baseName'])
for carpeta in d['SubCarpetas']:
if not carpeta in self.ignorar:
self.tabla(d['SubCarpetas'][carpeta], nivel+1, pwd+self.s+d['SubCarpetas'][carpeta]['baseName'])
for f in d['ficheros']:
if not d['ficheros'][f]['name'] in self.ignorar:
self.index[self.ID]=d['ficheros'][f]
self.ID += 1
def formato(self,formato):
formatos=[]
if 'video' in [f.lower() for f in formato]:
formatos += ['.mp4','.avi','.webm','.mkv','.rmvb','.vob','.wmv']
if 'audio' in [f.lower() for f in formato]:
formatos += ['.mp3','.wav','.ogg','.m4a','.wav','.aud']
if 'imagen' in [f.lower() for f in formato]:
formatos += ['.jpeg','.jpg','.png','.ico','.bmp','.gif']
if 'script' in [f.lower() for f in formato]:
formatos += ['.sl','.ino','.c','.h','.xml','.java','.php','.html','.cmd','.cpp','.py','.js','.css','.txt']
if 'documento' in [f.lower() for f in formato]:
formatos += ['.pdf','.doc','.docx','.csv','.txt']
if formatos:
return formatos
else:
['']
def strToClave(self,nombre):
clave=nombre
filtro=[
('ú','u'),
('á','a'),
('é','e'),
('í','i'),
('ó','o'),
('ú','u'),
('_',''),
('-',''),
('¡',''),
('!',''),
(' ',' '),
(' ',''),
('@',''),
('|',''),
('º',''),
('!',''),
('·',''),
('$',''),
('%',''),
('&',''),
('/',''),
('(',''),
(')',''),
('¬',''),
('~',''),
('¡',''),
('´',''),
('ç',''),
('`',''),
('-',''),
('+',''),
('-',''),
('*',''),
('[',''),
(']',''),
('{',''),
('}',''),
('"',''),
('<',''),
('>',''),
('=',''),
('?',''),
('¿','')
]
for change in filtro:
clave=clave.replace(change[0],change[1])
return clave
def carpetas(self,pwd,baseName):
dirs = self.listaDeCarpetas(pwd,validar=1)
ret = [archivo for archivo in os.listdir(pwd) if ((os.path.isfile(pwd+self.s+archivo)) and (not archivo in self.ignorar))]
fichero = {}
for archivo in ret:
extencion=['.'+archivo.split('.')[-1].lower()]
if (not self.formatos) or (extencion in self.formatos):
#fichero={
# 'name':nombre del fichero,
# 'pwd':ruta absoluta o relativa al fichero,
# 'tags':una lista con las etiquetas del fichero
# }
palabras=archivo[:-len(extencion[0])].lower().split(' ')
carpetasPadre=pwd[len(self.pwd):].lower().split(self.s)
fichero[self.strToClave(archivo)] = {
'name':archivo,
'pwd':pwd[0 if self.full else len(self.pwd):]+self.s+archivo,
'tags':carpetasPadre+ palabras+extencion
}
sub={}
if dirs:
for file in dirs:
sub[self.strToClave(file)]=self.carpetas(pwd+self.s+file,file)
return {'baseName':baseName,'SubCarpetas':sub,'ficheros':fichero}
def listaDeCarpetas(self,pwd,ignorar=[],validar=0):
if not pwd:
pwd=self.pwd
dirs=[]
for archivo in os.listdir(pwd):
if (not archivo in self.ignorar) and (not archivo in ignorar):
if archivo[0] in self.prefijos:
if self.renombrarArchivos:
os.rename(pwd+self.s+archivo,pwd+self.s+' '+archivo)
archivo=' '+archivo
try:
if (not os.path.isfile(pwd+self.s+archivo)):
dirs+=[archivo]
if validar:
try:
os.listdir(pwd+self.s+archivo)
except:
dirs=dirs[:-1]
except Exception as e:
raise e
#return [carpeta for carpeta in os.listdir(pwd) if ((not (os.path.isfile(pwd+s+carpeta)) and (not carpeta in ignorar) ) )]
return dirs
def listaDeFicheros(self,pwd,ignorar=[]):
return [fichero for fichero in os.listdir(pwd) if ((os.path.isfile(pwd+self.s+fichero)) and (not fichero in ignorar) )]
def habilitar(self):
filtro=[
('à','a'),
('è','e'),
('ì','i'),
('ò','o'),
('ù','u'),
('á','a'),
('é','e'),
('í','i'),
('ó','o'),
('ú','u'),
('Ò','O'),
('À','A'),
('È','E'),
('Ì','I'),
('Ù','U'),
('Ú','U'),
('Ó','O'),
('Í','I'),
('É','E'),
('Á','A')
]
for f in self.index:
pasar=self.index[f]['name']
for letra in filtro:
pasar=pasar.replace(letra[0],letra[1])
ok=pasar
for letra in pasar:
if ord(letra) > 256:
ok=ok.replace(letra,'_')
if ok[0] in self.prefijos:
ok = ' '+ok
if ok != self.index[f]['name']:
if not os.path.isfile(str(self.index[f]['pwd'].replace(self.index[f]['name'],ok))):
os.rename(str(self.index[f]['pwd']),str(self.index[f]['pwd'].replace(self.index[f]['name'],ok)))
self.index[f]['pwd']=self.index[f]['pwd'].replace(self.index[f]['name'],ok)
self.index[f]['name']=ok
class audio(object):
"""
import pyAudio,wave
pu:(guarani), traduccion:sonido
clase que puede reproducir canales de sonido en segundo plano,
se puede hacer sonar diferentes canales a la vez, identando diferentes objetos de esta clase,
un objeto de este clase es como tener un canal de sonido para reproducir un sonido a la ves,
si desea reproducir mas de un sonido a la ves, debera identar varios objetos de esta clase.
ejemplo:
canal_DO=audio("Flauta DO.wav","DO",pedal=0,tecla='A')
canal_RE=audio("Flauta RE.wav","RE",pedal=0,tecla='S')
canal_MI=audio("Flauta MI.wav","MI",pedal=0,tecla='D')
"""
def __init__(self, archivo,nota,pedal,tecla=''):
super(audio, self).__init__()
import pyaudio #maneja los puertos de audio, requiere 'portAudio' en el sistema
import wave #para manipular los ficheros de sonido, por defecto archivos .wav
self.tecla=tecla #en caso de activar con alguna tecla
self.archivo=archivo #archivo en formato .wav para reproducir
self.nota=nota #en caso de que sea un instrumento
self.pedal=0 #es para que el sonido siga, despues de darle .off(), util si quere que el sonido siga despues de soltar el sensor
self.wav = wave.open(self.archivo, 'rb')
self.Audio=pyaudio.PyAudio()
self.stream=None
self.presionado=0
def activo(self):
if self.stream:
return self.stream.is_active()
else:
return 0
def callback(self,in_data, frame_count, time_info, status):
data = self.wav.readframes(frame_count)
return (data, pyaudio.paContinue)
def play(self):
self.wav = wave.open(self.archivo, 'rb')
self.stream = self.Audio.open(format=self.Audio.get_format_from_width(self.wav.getsampwidth()),
channels=self.wav.getnchannels(),
rate=self.wav.getframerate(),
output=True,
stream_callback=self.callback)
self.stream.start_stream()
def stop(self):
try:
if self.stream.is_active():
self.stream.stop_stream()
self.stream.close()
self.wav.close()
except:
print(self.nota,"ya esta cerrado")
def terminate(self):
self.stop()
self.Audio.terminate()
def On(self):
self.presionado=1
self.stop()
self.play()
def Off(self):
self.presionado=0
if self.stream.is_active() and self.pedal:
self.stop()
def wait(self):
while self.stream.is_active():
time.sleep(0.1)
class CRUD(object):
"""
import sqlite3, os
La clase para administrar la base de datos
estructura del diccionario de tablas:
tablas={
'nombreTabla':{'campoID':['campoID','campo1','campo2','campo3','campo4','campo5']}
}
"""
def __init__(self, tabla, campoAutoincrement='id', dirCRUD="myMbareteCRUD.sql", reset=0):
"""
bb='Mbarete.sql' nombre de la base de datos y del archivo que sera el contenedor del gestor de base de datos Sqlite3
reset=1 borrara el archivo con el nombre 'miBaseDeDatos.sql' que le pases como parametro en dirCRUD='miBaseDeDatos.sql', 'Mbarete.sql' es el archivo que se crea por defecto si no se asigna un nombre diferente a 'dirCRUD'
reset=0 no eliminara el archivo 'Mbarete.sql' y ejecutara los comandos SQL en el archivo, si hacemos cambios en la estructura de una tabla en la base de datos debemois resetar el archivo para poder
"""
self.campoAutoincrement=campoAutoincrement if (' ' in campoAutoincrement) else campoAutoincrement+' integer not null primary key autoincrement'
self.reset=reset
self.dirCRUD=dirCRUD
if not '.' in self.dirCRUD.split(os.path.sep)[-1]:
self.extencionCRUD='.sql'
else:
self.extencionCRUD='.'+(self.dirCRUD.split(os.path.sep)[-1]).split('.')[-1]
self.tablas=tabla
if self.reset:
if (os.path.exists(self.dirCRUD)):
os.remove(self.dirCRUD)
for tabl in self.tablas:
self.CrearTabla(tabl,self.tablas[tabl])
def CrearTabla(self,nombreTabla,columnas,autoincrement=0,dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
# nombre_tabla="miTabla"
# columnas=["primer_campo TEXT","segundo_campo TEXT","tercer_campo TEXT","cuarto_campo TEXT"]
# Comprueba si la tabla "nombre_tabla" existe, en caso de no existir la creara
# cursor.execute("""CREATE TABLE IF NOT EXISTS ( TEXT, TEXT)""")
"""
CrearTabla("Nombre_de_la_tabla",["primer_campo TEXT","segundo_campo TEXT","tercero_campo TEXT","cuarto_campo TEXT"])
CREATE TABLE IF NOT EXISTS nombre_de_la_tabla (variable TEXT, valor TEXT)
"""
if self.reset == False:
self.tablas.setdefault(nombreTabla,[self.campoAutoincrement]+columnas if autoincrement else columnas)
self.comandoSQL="CREATE TABLE IF NOT EXISTS "+str(nombreTabla)+" "
self.colum="("
for col in range(0,(len(columnas)-1),1):
self.colum += str(columnas[col])+", "
self.colum += str(columnas[-1])+")"
self.comandoSQL += self.colum
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute(self.comandoSQL)
self.con.commit()
self.cursor.close()
def SelectAll(self,nombreTabla,typeSalida="dict",campoClave="id",dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
campos=[key.split(' ')[0] for key in self.tablas[nombreTabla]]
noModificar=[self.tablas[nombreTabla].index(self.campoAutoincrement)] if (self.campoAutoincrement in self.tablas[nombreTabla]) else []
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute("SELECT * FROM '%s'"%(nombreTabla))
self.con.commit()
if typeSalida=="list":
ret=[campos]+[[strToVar(unicodeToStr(fila[n])) if (not n in noModificar) else fila[n] for n in range(0,len(fila),1)] for fila in self.cursor.fetchall()]
elif typeSalida=="dict":
ret={}
for registro in [[strToVar(unicodeToStr(fila[n])) if (not n in noModificar) else fila[n] for n in range(0,len(fila),1)] for fila in self.cursor.fetchall()]:
pasar={}
for clave in campos:
pasar.setdefault(clave,registro[campos.index(clave)])
ret.setdefault(registro[campos.index(campoClave if (campoClave in campos) else campos[0])],pasar)
else:
ret=[campos]+[[strToVar(unicodeToStr(fila[n])) if (not n in noModificar) else fila[n] for n in range(0,len(fila),1)] for fila in self.cursor.fetchall()]
self.cursor.close()
return ret
def Cargar(self,nombreTabla,columnas,valores,dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
"""insert into '%s' ('%s','%s') values ('%s','%s')"%(tablas[tabla][0],tablas[tabla][1],tablas[tabla][2],infoHashtag[0], infoHashtag[1])"""
self.comandoSQL="insert into "+str(nombreTabla)+" "
self.colum="("
self.val="("
for col in range(0,(len(columnas)-1),1):
self.colum=self.colum+str(columnas[col].split(' ')[0])+","
self.val=self.val+"'"+strToUnicode(valores[col])+"',"
self.colum=self.colum+str(columnas[-1].split(' ')[0])+")"
self.val=self.val+"'"+strToUnicode(valores[-1])+"')"
self.comandoSQL=self.comandoSQL+self.colum+" values "+self.val
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute(self.comandoSQL)
self.con.commit()
self.cursor.close()
def Modificar(self,nombreTabla,columnas,valores,dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
""" sin terminar """
self.comandoSQL="insert into "+str(nombreTabla)+" "
self.colum="("
self.val="("
for col in range(0,(len(columnas)-1),1):
self.colum=self.colum+str(columnas[col].split(' ')[0])+","
self.val=self.val+"'"+strToUnicode(valores[col])+"',"
self.colum=self.colum+str(columnas[-1].split(' ')[0])+")"
self.val=self.val+"'"+strToUnicode(valores[-1])+"')"
self.comandoSQL=self.comandoSQL+self.colum+" values "+self.val
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute("DELETE FROM %s WHERE %s='%s'"%(nombreTabla,columnas[0].split(' ')[0],strToUnicode(valores[0])))
self.con.commit()
self.cursor.execute(self.comandoSQL)
self.con.commit()
self.cursor.close()
def Elimina(self,nombreTabla,columnas,valores,dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
""" """
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute("DELETE FROM %s WHERE %s='%s'"%(nombreTabla,columnas[0].split(' ')[0],strToUnicode(valores[0])))
self.con.commit()
self.cursor.close()
def exportarTablas(self,tabla=[],formato="shell",campoClave="",file="",dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
""" guardara las tablas que estan guardadas en la base de datos en formato ["shell","csv","py","js"] """
ret={}
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
if tabla:
for clave in tabla:
ret.setdefault(clave,[[campo.split(" ")[0] for campo in self.tablas[clave]]])
noModificar=[self.tablas[clave].index(self.campoAutoincrement)] if (self.campoAutoincrement in self.tablas[clave]) else []
self.cursor.execute("SELECT * FROM '%s'"%(clave))
self.resultado = self.cursor.fetchall()
self.con.commit()
for i in range(0,len(self.resultado),1):
registro=list(self.resultado[i][0:])
self.resultado[i]=[strToVar(unicodeToStr(registro[n])) if (not n in noModificar) else registro[n] for n in range(0,len(registro),1)]
ret[clave]+=self.resultado
else:
for tabl in self.tablas:
ret.setdefault(tabl,[[campo.split(" ")[0] for campo in self.tablas[tabl]]])
noModificar=[self.tablas[tabl].index(self.campoAutoincrement)] if (self.campoAutoincrement in self.tablas[tabl]) else []
self.cursor.execute("SELECT * FROM '%s'"%(tabl))
self.resultado = self.cursor.fetchall()
self.con.commit()
for i in range(0,len(self.resultado),1):
registro=list(self.resultado[i][0:])
self.resultado[i]=[strToVar(unicodeToStr(registro[n])) if (not n in noModificar) else registro[n] for n in range(0,len(registro),1)]
ret[tabl]+=self.resultado
self.cursor.close()
if formato.lower() in "shell consola":
f='en consola'
for clave in ret:
print('Nombre de la tabla:',clave,'\n','Numero de Registros:',len(ret[clave]))
for i in ret[clave]:
print(" ",i)
elif formato.lower() in ".csv":
f=(file if file else self.dirCRUD[:-len(self.extencionCRUD)])+".csv"
file=open(f,"w")
for clave in ret:
file.write('Nombre de la tabla:'+";"+str(clave)+'\n')
file.write('Numero de Registros:'+";"+str(len(ret[clave]))+'\n')
for i in ret[clave]:
registro=""
for x in i:
registro+=';'+str(x)
file.write(" "+";"+str(registro)+""+'\n')
file.write('\n'+'\n'+'\n'+'\n'+'\n')
elif formato.lower() in ".py":
f=(file if file else self.dirCRUD[:-len(self.extencionCRUD)])+".py"
file=open(f,"w")
file.write(str(r'#!/usr/bin/env python'+'\n'))
file.write(str(r'# -*- coding: latin-1 -*-'+'\n'))
for clave in ret:
campos=[campo.split(" ")[0] for campo in self.tablas[clave]]
myClave=campoClave if campoClave in campos else campos[0]
if 1<len(ret[clave]):
file.write(str(clave)+'={\n')
for reg in range(1,len(ret[clave]),1):
file.write(" '"+str(ret[clave][reg][campos.index(myClave)])+"':{\n")
for x in range(0,len(ret[clave][reg])-1,1):
file.write(" '"+str(campos[x])+"':"+( '"'+ret[clave][reg][x]+'"' if "str" in str(type(ret[clave][reg][x])) else str(ret[clave][reg][x]) )+",\n")
file.write(" '"+str(campos[-1])+"':"+( '"'+ret[clave][reg][-1]+'"' if "str" in str(type(ret[clave][reg][-1])) else str(ret[clave][reg][-1]) )+"\n")
file.write(" }\n" if len(ret[clave][1:])==reg else " },\n" )
file.write('}\n'+'\n')
file.close()
print("Fue exportado Exitosamente ,",f)
def command(self,comandoSQL,ret=0,dirCRUD=''):
if not dirCRUD:
dirCRUD=self.dirCRUD
self.con = sqlite3.connect(dirCRUD)
self.cursor = self.con.cursor()
self.cursor.execute(comandoSQL)
self.con.commit()
rtrn=self.cursor.fetchall()
self.cursor.close()
if ret:
return rtrn
class GUI(object):
"""
Clase de una GUI basado en Tkinter para crear y editar una GUI SIMPLE,
y posiblemente se pueda exportar una version .html con la logica en javascript
en un .js y sus estilos en un .css
"""
#titulo="Mbarete Proyect",
#bbdd={},
#comandos={},
#tablas={},
#reset=0,
#dirCRUD='',
#pwd=''
#el paramatro recibido en 'bbdd={}' debe ser un gestor de bases de datos de la clase CRUD, en otras palabras debe ser un objeto de la clase 'CRUD', y eso debe declararse antes de inicializar esta clase para poder pasarle a 'GUI'
def __init__(self,titulo="Mbarete Proyect",bbdd={},comandos={},tablas={},reset=0,dirCRUD='',pwd='', campoAutoincrement='id'):
if pwd:
self.pwd=pwd
else:
self.pwd=os.getcwd()
self.tablas={
'atributos':["clave TEXT","valor TEXT"],
'inputType':["clave TEXT","valor TEXT"],
'Vars':["clave TEXT","valor TEXT"]
}
if tablas:
for clave in tablas:
self.tablas.setdefault(clave,tablas[clave])
if dirCRUD:
self.dirCRUD=dirCRUD
else:
self.dirCRUD=self.pwd+os.path.sep+titulo.replace(' ','_')+"_CRUD_.sql"
if bbdd:
self.Sql=bbdd
self.dirCRUD=self.Sql.dirCRUD
else:
self.Sql=CRUD(self.tablas, campoAutoincrement='id', dirCRUD=self.dirCRUD, reset=reset)
self.variablesEnSql=[]
self.gitignore=[]
self.menus={}
self.Vars={}
if reset:
from mbarete import myVars as externo
self.INPUTS_CONFIG=self.crearVariable("inputType",externo.inputsDefault)
self.atrb=self.crearVariable("atributos",externo.atributos)
del (externo)
for tabla in self.tablas:
if not tabla in self.Sql.tablas:
self.Sql.CrearTabla(tabla,self.tablas[tabla])
#print(tabla)
#print(self.Sql.tablas)
else:
self.INPUTS_CONFIG=self.recuperarVariable("inputType")
self.atrb=self.recuperarVariable("atributos")
self.atrb['bbdd']=self.dirCRUD
self.atrb['campoAutoincrement']=self.Sql.campoAutoincrement
#lista de los widgetType que son creados automaticamente por y para esta clase al momento de declara un objeto de esta clase
self.defaultWidgets=['Tk','myFrame','scrollCanvas','myCanvas','scrollbar','panel','Frame']
#widgets que seran conectados automaticamente a variables de tipo IntVar(),StringVar(),DoubleVar(),BooleanVar()
self.widgetConectadoaVars=['Radiobutton','Checkbutton','Entry']
#Contendra todos los widgets tkinter, canvas, ttk, etc.
#Cada elemento del diccionario, contendra el widget y todas las informaciones adicionales para poder configurar y manipular el widget
self.widgets={}
self.widgets.setdefault('tk',
{
'widget':Tk(),
'inputType':'Tk'
}
)
self.title(titulo)
#Enlazamos la funcion 'self.onkey(self,event)' al evento "<Key>" que es cada ves que se presiona una tecla y el Widget 'tk' este en uso o en Foco, o sea esten usando el programa
self.widgets['tk']['widget'].bind("<Key>",self.onkey)
#Enlazamos una Funcion anonima 'lambda' al evento "<MouseWheel>", este evento se activa a mover la rueda del raton
self.widgets['tk']['widget'].bind("<MouseWheel>",lambda event: self.widgets['canvas']['widget'].yview_scroll(int(-1*(event.delta/120)), "units"))
#cada ves que presionen alguna tecla del raton se ejecutara la funcion 'self.onclick(self,event)'
self.widgets['tk']['widget'].bind("<Button>",self.onclick)
#cada ves se modifique algo en 'tk' se ejecutara la funcion 'self.onclick(self,event)'. Tamaño o posicion de la ventana o algun evento .config en el widget 'tk' que seria la ventana.
self.widgets['tk']['widget'].bind("<Configure>",self.responsive)
self.widgets.setdefault('f',{
'widget':Frame(self.widgets['tk']['widget']),
'inputType':'myFrame',
'padre':'tk'
})
#canvas para usar como area de Scroll
self.widgets.setdefault('canvas',{
'widget':Canvas(self.widgets['f']['widget']),
'inputType':'myCanvas',
'padre':'f'
})
#barra de Scroll Vertical
self.widgets.setdefault('yscrollbar',{
'widget':Scrollbar(self.widgets['f']['widget'], orient='vertical',command=self.widgets['canvas']['widget'].yview),
'inputType':'scrollbar',
'padre':'f'
})
#barra de Scroll Horizontal
self.widgets.setdefault('xscrollbar',{
'widget':Scrollbar(self.widgets['f']['widget'], orient='horizontal',command=self.widgets['canvas']['widget'].xview),
'inputType':'scrollbar',
'padre':'f'
})
#Frame Scrolleado con el widget 'canvas' ya que 'myFrame' pertenece a 'canvas' y 'canvas' a su ves esta siendo controlado con Scrollbar en X y en Y
self.widgets.setdefault('myFrame',{
'widget':Frame(self.widgets['canvas']['widget'],width=self.atrb['ancho'],height=self.atrb['alto'],bg=self.atrb['fondo']),
'inputType':'myFrame'
})
self.widgets['myFrame']['widget'].bind("<Configure>",lambda event: self.widgets['canvas']['widget'].configure(scrollregion=self.widgets['canvas']['widget'].bbox("all")))
self.widgets['canvas']['widget'].create_window((0, 0),window=self.widgets['myFrame']['widget'],anchor="nw")
self.widgets['canvas']['widget'].configure(yscrollcommand=self.widgets['yscrollbar']['widget'].set,xscrollcommand=self.widgets['xscrollbar']['widget'].set)
self.widgets['xscrollbar']['widget'].pack(side='bottom', fill='x')
self.widgets['yscrollbar']['widget'].pack(side='right', fill='y')
self.widgets['f']['widget'].pack(expand=1, fill='both')
self.widgets['canvas']['widget'].pack(side="left" , fill="both", expand=True)
self.widgets['tk']['widget'].geometry("%sx%s+%s+%s"%(str(self.atrb['ancho']),str(self.atrb['alto']),str(self.atrb['Xexterior']),str(self.atrb['Yexterior'])))
self.widgets['tk']['widget'].update()
#print(buscarFunciones("dictFunciones.py"))
self.clickDerecho=[
[0,500,0,500,"pruebaClickDerecho"]
]
self.clickIzquierdo=[
[0,500,0,500,"pruebaClickIzquierdo"]
]
self.clickRueda=[
[0,500,0,500,"pruebaClickRueda"]
]
self.defaultCommand={
'GUI_destroy':{'inputType':'Button','command':'GUI_destroy','text':'Salir'},
'GUI_Aceptar':{'inputType':'Button','command':'GUI_Aceptar','text':'ACEPTAR'},
'GUI_Guardar':{'inputType':'Button','command':'GUI_Guardar','text':'GUARDAR'},
'GUI_Borrar':{'inputType':'Button','command':'GUI_Borrar','text':'BORRAR'},
'GUI_Leer':{'inputType':'Button','command':'GUI_Leer','text':'Leer'},
'GUI_Exportar_PY':{'inputType':'Button','command':'GUI_Exportar_PY','text':'Exportar a '+self.atrb['titulo']+'.py'},
'GUI_Exportar_CSV':{'inputType':'Button','command':'GUI_Exportar_CSV','text':'Exportar a '+self.atrb['titulo']+'.csv'}
}
self.command={
'pruebaClickDerecho':[myFuncion,(self.atrb['titulo'],self.atrb['bbdd'],"pruebaClickDerecho")],
'pruebaClickIzquierdo':[myFuncion,(self.atrb['titulo'],self.atrb['bbdd'],"pruebaClickIzquierdo")],
'pruebaClickRueda':[myFuncion,(self.atrb['titulo'],self.atrb['bbdd'],"pruebaClickRueda")],
'GUI_destroy':lambda : self.widgets['tk']['widget'].destroy(),
'GUI_Aceptar':lambda : print('Aceptar ',self.atrb['frameActivo']),
'GUI_Guardar':lambda : self.comandoGuardar(),
'GUI_Borrar':lambda : print('Borrar ',self.widgets['tk']['text']),
'GUI_Leer':lambda : self.comandoLeer(),
'GUI_Exportar_PY':lambda : self.comandoExportar(campoClave='id',file='',formato='.py',tabla=[]),
'GUI_Exportar_CSV':lambda : self.comandoExportar(campoClave='id',file='',formato='.csv',tabla=[])
}
if comandos:
for clave in comandos:
self.command.setdefault(clave,comandos[clave])
def title(self,titulo):
self.atrb['titulo']=titulo
self.widgets['tk']['widget'].title(self.atrb['titulo'])
def loop(self):
self.update()
self.widgets['tk']['widget'].update()
self.widgets['tk']['widget'].mainloop()
def updateVar(self,name,arg2,mod):
#print(name,arg2,mod)
nombreSinPadre=name.replace(self.widgets[name]['padre']+'_','')
if self.widgets[name]['inputType'] in ['Radiobutton']:
ok_name=nombreSinPadre.replace('_'+nombreSinPadre.split('_')[-1],'')
else:
ok_name=nombreSinPadre
self.widgets[self.widgets[name]['padre']]['value'][ok_name]=self.Vars[name].get()
def setVar(self,name,values={}):
myVars=[]
if self.widgets[name]['inputType'] in ['Frame','panel']:
for w in self.widgets:
if (self.widgets[w]['inputType'] not in self.defaultWidgets):
if (self.widgets[w]['padre'] == name) and (self.widgets[w]['inputType'] in self.widgetConectadoaVars):
myVars += [w]
for buscar in [w for w in myVars if (self.widgets[w]['inputType'] in ['Radiobutton'])]:
if buscar in self.Vars:
for v in values:
if str(name+'_'+v) in buscar:
self.Vars[buscar].set(values[v])
for v in values:
if str(name+'_'+v) in self.Vars:
self.Vars[name+'_'+v].set(values[v])
if not values:
for v in myVars:
if v in self.Vars:
self.Vars[v].set(self.INPUTS_CONFIG[self.widgets[v]['inputType']]['value'])
def onkey(self,event):
#print(event)
pass
def onclick(self,event):
for w in self.widgets:
if self.widgets[w]['widget']==event.widget:
name = w
if 'canvas' in self.widgets[w]:
if self.widgets[w]['canvas']==event.widget:
name = w
if name=='canvas':
xReal=event.x
yReal=event.y
else:
hijo=name
padre=self.widgets[name]['padre']
while padre!='tk':
if 'padre' in self.widgets[hijo]['padre']:
padre=self.widgets[hijo]['padre']
if self.widgets[hijo]['inputType'] in self.defaultWidgets:
padre='tk'
else:
hijo=padre
#print('name:',name,'hijo:',hijo,'padre:',padre)
xReal=int(self.widgets[hijo]['xPlace']+event.x)
yReal=int(self.widgets[hijo]['yPlace']+event.y)
#print(xReal,yReal)
#print(event.x,event.y,event.state,event.num,event.widget)
if 1==int(event.num):
for area in self.clickIzquierdo:
if ((area[1]>=event.x) and (area[0]<=event.x)) and ((area[3]>=event.y) and (area[2]<=event.y)):
threading.Thread(target=self.command[area[-1]][0],args=self.command[area[-1]][1]).start()
elif 2==int(event.num):
for area in self.clickRueda:
if ((area[1]>=event.x) and (area[0]<=event.x)) and ((area[3]>=event.y) and (area[2]<=event.y)):
threading.Thread(target=self.command[area[-1]][0],args=self.command[area[-1]][1]).start()
elif 3==int(event.num):
for area in self.clickDerecho:
if ((area[1]>=event.x) and (area[0]<=event.x)) and ((area[3]>=event.y) and (area[2]<=event.y)):
threading.Thread(target=self.command[area[-1]][0],args=self.command[area[-1]][1]).start()
def responsive(self,event):
self.widgets['tk']['widget'].update()
if (self.atrb['alto'] != self.widgets['tk']['widget'].winfo_height()) or (self.atrb['ancho']!= self.widgets['tk']['widget'].winfo_width()):
self.atrb['alto'] = self.widgets['tk']['widget'].winfo_height()
self.atrb['ancho']= self.widgets['tk']['widget'].winfo_width()
self.update()
def recuperarVariable(self,nombreVariable):
#funcion que recupera variables de una tabla y los retorna como una variable original de este clase
ret={}
dictReturn=self.Sql.SelectAll(nombreVariable,typeSalida='dict',campoClave='clave')
if nombreVariable in self.tablas:
for clave in dictReturn:
ret.setdefault(dictReturn[clave]['clave'],dictReturn[clave]['valor'])
return ret
def crearVariable(self,nombreVariable,variables):
#nombreVariable: nombre de la variable en el programa
#variables: variable tipo diccionario sera guardada en una tabla, y el nombre de la tabla sera 'nombreVariable'
self.tablas.setdefault(nombreVariable,["clave TEXT","valor TEXT"])
self.variablesEnSql+=[nombreVariable]
self.Sql.CrearTabla(nombreVariable,self.tablas[nombreVariable])
for clave in variables:
self.Sql.Cargar(nombreVariable,self.tablas[nombreVariable],[clave, variables[clave] ])
ret={}
dictReturn=self.Sql.SelectAll(nombreVariable,typeSalida='dict',campoClave='clave')
for clave in dictReturn:
ret.setdefault(dictReturn[clave]['clave'],dictReturn[clave]['valor'])
return ret
def guardarVariable(self,nombreVariable,variables):
for clave in variables:
self.Sql.Cargar(nombreVariable,self.tablas[nombreVariable],[clave, variables[clave]])
def update(self):
self.widgets['tk']['widget'].geometry("%sx%s"%(str(self.atrb['ancho']),str(self.atrb['alto'])))
#print("update")
#self.widgets['myFrame']['widget'].config(width = self.atrb['ancho'], height = self.atrb['alto'])
#self.widgets['canvas']['widget'].config(width = self.atrb['ancho'], height = self.atrb['alto'])
self.margenSuperior=0
self.margenInferior=0
self.margenIzquierdo=0
self.margenDerecho=0
espasioAlfinal=100
altoTotal=0
anchoTotal=0
sandwishSuperior=[]
sandwishInferior=[]
sandwishDerecho=[]
sandwishIzquierdo=[]
#actualizamos los parametros 'command' de los widgets
for w in self.widgets:
if self.widgets[w]['inputType']=='Button':
self.widgets[w]['widget'].config(
command=self.command[self.widgets[w]['command']],
text=self.widgets[w]['text'],
width=self.widgets[w]['width'] if self.widgets[w]['width'] else None,
bg=self.widgets[w]['bgColor'],
fg=self.widgets[w]['fontColor'],
font=(self.widgets[w]['fontType'],self.widgets[w]['fontSize'])
)
elif self.widgets[w]['inputType'] in ['Label','Checkbutton','Radiobutton']:
self.widgets[w]['widget'].config(
text=self.widgets[w]['text'],
bg=self.widgets[w]['bgColor'],
fg=self.widgets[w]['fontColor'],
font=(self.widgets[w]['fontType'],self.widgets[w]['fontSize'])
)
elif self.widgets[w]['inputType'] in ['Entry']:
self.widgets[w]['widget'].config(
bg=self.widgets[w]['bgColor'],
width=self.widgets[w]['width'] if self.widgets[w]['width'] else None,
fg=self.widgets[w]['fontColor'],
insertbackground=self.widgets[w]['fontColor'],
font=(self.widgets[w]['fontType'],self.widgets[w]['fontSize'])
)
#self.widgets[w]['padre']+self.widgets[w]['']
#[self.widgets[w]['name']+'_LabelCanvas']
if self.widgets[w]['name']+'_LabelCanvas' in self.widgets[self.widgets[w]['padre']]['inputs']:
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['xPlace']=self.widgets[w]['xPlace']
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['yPlace']=self.widgets[w]['yPlace']
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['ancho']=self.anchoWidget(self.widgets[w])
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['alto']=self.altoWidget(self.widgets[w])*0.5
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['fontSize']=int(self.widgets[w]['fontSize']*0.7)
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['fontType']=self.widgets[w]['fontType']
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['fontColor']=self.widgets[w]['fontColor']
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['bgColor']=self.widgets[w]['bgColor']
self.widgets[self.widgets[w]['padre']]['inputs'][self.widgets[w]['name']+'_LabelCanvas']['text']=self.widgets[w]['text']
#calculamos espacios de los margenes que necesitaremos para ubicar en ellos los paneles laterales, superiores, inferiores
for myWidget in self.widgets:
if self.widgets[myWidget]['inputType']=='panel' and self.widgets[myWidget]['visible']:
self.margenDerecho += ((11-self.widgets[myWidget]['fontSize'])+(self.widgets[myWidget]['width']+1)*(self.widgets[myWidget]['fontSize']-1)+(self.widgets[myWidget]['width']*self.atrb['fontSizeToCorrectorAncho'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']])) if ('e'==self.widgets[myWidget]['anchor'][0]) else 0
self.margenIzquierdo += ((11-self.widgets[myWidget]['fontSize'])+(self.widgets[myWidget]['width']+1)*(self.widgets[myWidget]['fontSize']-1)+(self.widgets[myWidget]['width']*self.atrb['fontSizeToCorrectorAncho'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']])+self.atrb['scrollVerticalAncho']) if ('o'==self.widgets[myWidget]['anchor'][0]) else 0
self.margenSuperior += (self.atrb['fontSizeToAlto'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']]) if ('n'==self.widgets[myWidget]['anchor'][0]) else 0
self.margenInferior += self.atrb['scrollHorizontalAlto']+(self.atrb['fontSizeToAlto'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']]) if ('s'==self.widgets[myWidget]['anchor'][0]) else 0
for myWidget in self.widgets:
if self.widgets[myWidget]['inputType'] in ['Frame','panel']:
if self.widgets[myWidget]['visible']:
#menus[myWidget]={'anchor':self.widgets[myWidget]['anchor'],'':self.widgets[myWidget]['']}
#hallamos el espacio total en X y Y que ocuaran todos los widget dentro del 'frame' o 'panel'
ySuma,xSuma=0,0
for w in self.widgets:
if (self.widgets[w]['inputType'] not in self.defaultWidgets):
if (self.widgets[w]['padre'] == myWidget):
posicionfinalenY=self.widgets[w]['yPlace']+self.widgets[w]['alto']+self.atrb['scrollHorizontalAlto']+self.margenSuperior + espasioAlfinal
ySuma = posicionfinalenY if posicionfinalenY > ySuma else ySuma
posicionfinalenX=self.widgets[w]['xPlace']+self.widgets[w]['ancho']+self.atrb['scrollVerticalAncho']+self.margenIzquierdo
xSuma = posicionfinalenX if posicionfinalenX > xSuma else xSuma
if self.widgets[myWidget]['inputType'] in ['Frame']:
self.widgets[myWidget]['ancho'] =(self.atrb['ancho'])-self.margenIzquierdo-self.margenDerecho
self.widgets[myWidget]['alto'] = ySuma
self.widgets[myWidget]['yPlace'] = self.margenSuperior
self.widgets[myWidget]['xPlace'] = self.margenIzquierdo
self.atrb['frameActivo']=myWidget
if ySuma>altoTotal:
altoTotal=ySuma
#print(self.margenSuperior,self.margenInferior,self.margenIzquierdo,self.margenDerecho)
#print(ySuma,self.atrb['alto'])
if self.widgets['myFrame']['widget'].winfo_height()<altoTotal:
self.widgets['myFrame']['widget'].config(width=self.atrb['ancho'],height=altoTotal,bg=self.widgets[myWidget]['bgColor'])
if self.widgets['myFrame']['widget'].winfo_height()>self.widgets[myWidget]['alto']:
self.widgets['myFrame']['widget'].config(width=self.atrb['ancho'],height=self.atrb['alto'],bg=self.widgets[myWidget]['bgColor'])
if self.widgets[myWidget]['inputType'] in ['panel']:
self.widgets[myWidget]['ancho'] = ((11-self.widgets[myWidget]['fontSize'])+(self.widgets[myWidget]['width']+1)*(self.widgets[myWidget]['fontSize']-1)+(self.widgets[myWidget]['width']*self.atrb['fontSizeToCorrectorAncho'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']])) if (('e'==self.widgets[myWidget]['anchor'][0]) or ('o'==self.widgets[myWidget]['anchor'][0])) else (self.atrb['ancho']-self.atrb['scrollVerticalAncho'])
self.widgets[myWidget]['alto'] = self.atrb['alto']-self.margenInferior-self.margenSuperior-self.atrb['scrollHorizontalAlto'] if (('e'==self.widgets[myWidget]['anchor'][0]) or ('o'==self.widgets[myWidget]['anchor'][0])) else (self.atrb['fontSizeToAlto'][self.widgets[myWidget]['fontType']][self.widgets[myWidget]['fontSize']])
self.widgets[myWidget]['yPlace']= self.margenSuperior if (('e'==self.widgets[myWidget]['anchor'][0]) or ('o'==self.widgets[myWidget]['anchor'][0])) else (self.atrb['alto']-self.widgets[myWidget]['alto']-self.atrb['scrollHorizontalAlto']-sum([self.widgets[ocupa]['alto'] for ocupa in sandwishInferior]) if ('s'==self.widgets[myWidget]['anchor'][0]) else sum([self.widgets[ocupa]['alto'] for ocupa in sandwishSuperior]))
self.widgets[myWidget]['xPlace']= 0 if (('n'==self.widgets[myWidget]['anchor'][0]) or ('s'==self.widgets[myWidget]['anchor'][0])) else (self.atrb['ancho']-self.widgets[myWidget]['ancho']-self.atrb['scrollVerticalAncho']-sum([self.widgets[ocupa]['ancho'] for ocupa in sandwishDerecho]) if ('e'==self.widgets[myWidget]['anchor'][0]) else sum([self.widgets[ocupa]['ancho'] for ocupa in sandwishIzquierdo]))
if ('n'==self.widgets[myWidget]['anchor'][0]):
sandwishSuperior += [myWidget]
elif ('s'==self.widgets[myWidget]['anchor'][0]):
sandwishInferior += [myWidget]
elif ('e'==self.widgets[myWidget]['anchor'][0]):
sandwishDerecho += [myWidget]
elif ('o'==self.widgets[myWidget]['anchor'][0]):
sandwishIzquierdo += [myWidget]
if (self.widgets[myWidget]['ancho']!=self.widgets[myWidget]['widget'].winfo_width()) or (self.widgets[myWidget]['alto']!=self.widgets[myWidget]['widget'].winfo_height()) or (not (self.widgets[myWidget]['widget'].place_info())):
self.widgets[myWidget]['widget'].config(width=self.widgets[myWidget]['ancho'],height=self.widgets[myWidget]['alto'],bg=self.widgets[myWidget]['bgColor'])
self.widgets[myWidget]['canvas'].config(width=self.widgets[myWidget]['ancho'],height=self.widgets[myWidget]['alto'],bg=self.widgets[myWidget]['bgColor'])
if not (self.widgets[myWidget]['widget'].place_info()):
self.widgets[myWidget]['canvas'].place(x=0, y=0)
#gradient(poligono=[],x=0,y=0,height=0,width=0,rotacion=0,color1='#ffffff',color2='#000000')
if self.widgets[myWidget]['degradado']:
for line in gradient(width=self.widgets[myWidget]['ancho'],height=self.widgets[myWidget]['alto'],color1=self.widgets[myWidget]['bgColor'],color2=escalarHex(h=self.widgets[myWidget]['bgColor'],factor=0.05)):
self.widgets[myWidget]['canvas'].create_line(line[0],line[1],line[2],line[3],fill=line[4])
for w in self.widgets[myWidget]['inputs']:
if self.widgets[myWidget]['inputs'][w]['inputType'] in ['LabelCanvas']:
self.widgets[myWidget]['canvas'].create_text(
self.widgets[myWidget]['inputs'][w]['xPlace']+(self.widgets[myWidget]['inputs'][w]['ancho']/2),
self.widgets[myWidget]['inputs'][w]['yPlace']-(self.widgets[myWidget]['inputs'][w]['alto']/2),
fill=self.widgets[myWidget]['inputs'][w]['fontColor'],
font=(self.widgets[myWidget]['inputs'][w]['fontType'],self.widgets[myWidget]['inputs'][w]['fontSize']),
text=self.widgets[myWidget]['inputs'][w]['text']
)
self.widgets[myWidget]['widget'].place(x=self.widgets[myWidget]['xPlace'],y=self.widgets[myWidget]['yPlace'])
#self.widgets[myWidget]['canvas'].config(width=self.widgets[myWidget]['ancho'],height=self.widgets[myWidget]['alto'],bg=self.widgets[myWidget]['bgColor'])
#self.widgets[myWidget]['canvas'].place(x=0, y=0)
else:
self.widgets[myWidget]['widget'].place_forget()
print(myWidget,'place_forget')
for myWidget in self.widgets:
if (self.widgets[myWidget]['inputType'] not in self.defaultWidgets):
self.widgets[myWidget]['widget'].place(x=self.widgets[myWidget]['xPlace'],y=self.widgets[myWidget]['yPlace'])
self.widgets['tk']['widget'].update()
#self.widgets['canvas']['widget'].config(scrollregion=(0,0,self.widgets['canvas']['ancho'],self.widgets['canvas']['alto']))#self.widgets['canvas']['widget'].config(width = self.atrb['ancho'], height = self.atrb['alto'], scrollregion=(0,0,anchoTotal,altoTotal))
#self.widgets['tk']['widget'].geometry("%sx%s+%s+%s"%(str(self.atrb['ancho']),str(self.atrb['alto']),str(self.atrb['Xexterior']),str(self.atrb['Yexterior'])))
def anchoWidget(self,w):
return int( (11-w['fontSize']) + ((w['width']+1)*(w['fontSize']-1)) + (w['width']*self.atrb['fontSizeToCorrectorAncho'][w['fontType']][w['fontSize']]) )
def altoWidget(self,w):
return self.atrb['fontSizeToAlto'][w['fontType']][w['fontSize']]
def comandoGuardar(self):
if self.widgets[self.atrb['frameActivo']]['crearTabla']:
print(self.atrb['frameActivo'],self.tablas[self.atrb['frameActivo']][1:],[self.widgets[self.atrb['frameActivo']]['value'][v.split(' ')[0]] for v in self.tablas[self.atrb['frameActivo']][1:]] )
self.Sql.Cargar(self.atrb['frameActivo'],self.tablas[self.atrb['frameActivo']][1:],[self.widgets[self.atrb['frameActivo']]['value'][v.split(' ')[0]] for v in self.tablas[self.atrb['frameActivo']][1:]] ,dirCRUD=self.pwd+os.path.sep+self.atrb['transicion']+os.path.sep+self.atrb['transicion']+self.Sql.extencionCRUD)
self.setVar(self.atrb['frameActivo'])
def comandoLeer(self,campoClave='id'):
if self.widgets[self.atrb['frameActivo']]['crearTabla']:
if [1 for ok in self.atrb['subtransicion']['aceptar'] if (ok in self.widgets[self.atrb['frameActivo']]['etiquetas'])]:
print(self.Sql.SelectAll(self.atrb['frameActivo'],typeSalida='dict',campoClave=campoClave,dirCRUD=self.pwd+os.path.sep+self.atrb['transicion']+os.path.sep+self.atrb['transicion']+self.Sql.extencionCRUD))
#self.setVar(w)
def comandoExportar(self,campoClave='id',file='',formato='',tabla=[]):
if self.widgets[self.atrb['frameActivo']]['crearTabla']:
self.Sql.exportarTablas(
tabla=[self.atrb['frameActivo']],
formato=formato,
campoClave=campoClave,
file=self.pwd+os.path.sep+self.atrb['transicion']+os.path.sep+self.atrb['titulo'],
dirCRUD=self.pwd+os.path.sep+self.atrb['transicion']+os.path.sep+self.atrb['transicion']+self.Sql.extencionCRUD
)
#self.setVar(w)
def SetWidget(self,atributos={}):
myCommand={
"Aceptar":[myFuncion,(atributos['inputType'], atributos['name'], "Aceptar")],
"Aplicar":[myFuncion,(atributos['inputType'], atributos['name'], "Aplicar")],
"Modificar":[myFuncion,(atributos['inputType'], atributos['name'], "Modificar")],
"Guardar":[myFuncion,(atributos['inputType'], atributos['name'], "Guardar")],
"Siguiente":[myFuncion,(atributos['inputType'], atributos['name'], "Siguiente")],
"Anterior":[myFuncion,(atributos['inputType'], atributos['name'], "Anterior")],
"Ignorar":[myFuncion,(atributos['inputType'], atributos['name'], "Ignorar")],
"Cancelar":[myFuncion,(atributos['inputType'], atributos['name'], "Cancelar")],
"Salir":[myFuncion,(atributos['inputType'], atributos['name'], "Salir")],
"Validar":[myFuncion,(atributos['inputType'], atributos['name'], "Validar")]
}
#Combinamos el conjunto de datos que resive esta funcion en el parametro 'atributos', con el conjunto de datos que ya tenemos Predefenidos en 'myVars.py'
myWidget= self.INPUTS_CONFIG[atributos['inputType']]
if myWidget != 'None':
#print('linea 1273',atributos['inputType'],myWidget)
for clave in [clave for clave in atributos]:
if clave in [key for key in myWidget]:
myWidget[clave]=atributos[clave]
else:
myWidget.setdefault(clave,atributos[clave])
if 'padre' in myWidget:
if not myWidget['padre']:
myWidget['padre']='myFrame'
else:
myWidget['padre']='myFrame'
else:
myWidget={'inputType':'' }
"""
esta parte del codigo genera el 'Frame Tkinter' los inputs y outputs dentro de este
A cada 'Frame Tkinter' se le asignara un widget padre que le heredara el comportamiento deseado,
los widget padre solo son 'tk' y 'myFrame', estos widget son declarados en la funcion __init__ de esta clase,
COMPORTAMIENTOS:
'tk' es el objeto Tk() de tkinter, y sus hijos se comportaran de forma ESTATICA dentro del programa, los hijos de 'tk' no se mueven o desplazan dentro de la ventana del Programa, o sea no seran controlados por las 'barras de desplazamiento'(Scrollbar)
'myFrame' es el Frame() de tkinter, pero esta 'posicionado' dentro de un 'Canvas()' que este a su ves es controlado por las 'barras de desplazamiento'(Scrollbar), y los hijos de 'myFrame' tambien seran controlados por las 'barras de desplazamiento'(Scrollbar)
POSICIONAMIENTO:
'tk' sera el padre del 'Frame Tkinter' si le pasamos inputType='panel', y los hijos de 'Frame Tkinter' seran posicionados con .place(x=posicion real en pantalla,y=posicion real en pantalla)
'myframe' sera el padre del 'Frame Tkinter' si le pasamos inputType='Frame', como 'myFrame' puede tener una dimencion deferente de 'tk', este debe posicionar los widgets dentro del 'Frame Tkinter' calculando su posicion dentro 'myFrame'
"""
if myWidget['inputType'] in ['Frame','panel']:
if myWidget['inputType']=='Frame':
myWidget['widget']=Frame(
self.widgets['myFrame']['widget'],
width=myWidget['ancho'],
height=myWidget['alto'],
bd=0,
highlightthickness=0
)
myWidget['padre']='myFrame'
if myWidget['inputType']=='panel':
myWidget['widget']=Frame(
self.widgets['tk']['widget'],
width=myWidget['ancho'],
height=myWidget['alto'],
bg=myWidget['bgColor']
)
myWidget['padre']='tk'
myWidget['canvas']=Canvas(myWidget['widget'],bd=0,highlightthickness=0)
myWidget['value']={}
self.widgets.setdefault(myWidget['name'],myWidget)
if 'inputs'in myWidget:
inputs={}
x,y=0,0
saltoX,saltoY=0,0
if 'text' in myWidget:
inputs[myWidget['name']+'_Label']={
'inputType':'Label',
'padre':myWidget['name'],
'name':myWidget['name']+'_Label',
'xPlace':x,
'yPlace':y,
'ancho':self.anchoWidget(myWidget),
'alto':self.altoWidget(myWidget)*0.3+2,
'fontSize':int(myWidget['fontSize']),
'fontType':myWidget['fontType'],
'fontColor':myWidget['fontColor'],
'bgColor':myWidget['bgColor'],
'text':myWidget['text']}
self.SetWidget(atributos=inputs[myWidget['name']+'_Label'])
for btn in myWidget['inputs']:
if saltoX>0:
x += saltoX
saltoX=0
else:
x += self.anchoWidget(myWidget) if (('n'==myWidget['anchor'][0]) or ('s'==myWidget['anchor'][0])) else 0
if saltoY>0:
y += saltoY
saltoY=0
else:
y += self.altoWidget(myWidget)+self.atrb['espacioVerticalEntreWidgets']
myWidget['inputs'][btn].setdefault('name',myWidget['name']+'_'+btn)
myWidget['inputs'][btn].setdefault('padre',myWidget['name'])
myWidget['inputs'][btn].setdefault('width',myWidget['width'])
myWidget['inputs'][btn].setdefault('fontSize',myWidget['fontSize'])
myWidget['inputs'][btn].setdefault('fontType',myWidget['fontType'])
myWidget['inputs'][btn].setdefault('fontColor',(escalarHex(h=myWidget['fontColor'],factor=1.0/0.9) if myWidget['inputs'][btn]['inputType']=='Entry' else myWidget['fontColor']))
myWidget['inputs'][btn].setdefault('bgColor',(escalarHex(h=myWidget['bgColor'],factor=0.9) if myWidget['inputs'][btn]['inputType']=='Entry' else myWidget['bgColor']))
myWidget['inputs'][btn].setdefault('fgColor',myWidget['fgColor'])
myWidget['inputs'][btn].setdefault('xPlace',x)
if myWidget['inputs'][btn]['inputType'] in ['Entry']:
myWidget['inputs'][btn].setdefault('yPlace',y+(self.altoWidget(myWidget['inputs'][btn]) if 'text' in myWidget['inputs'][btn] else 0))
else:
myWidget['inputs'][btn].setdefault('yPlace',y)
if myWidget['inputs'][btn]['inputType'] in ['Radiobutton']:
myWidget['inputs'][btn]['alto']=self.altoWidget(myWidget['inputs'][btn])*(len(myWidget['inputs'][btn]['radios'])+1)
saltoY=myWidget['inputs'][btn]['alto']
if myWidget['inputs'][btn]['inputType'] in ['Entry']:
myWidget['inputs'][btn]['alto']=self.altoWidget(myWidget['inputs'][btn])*2
saltoY=myWidget['inputs'][btn]['alto']
self.SetWidget(atributos=myWidget['inputs'][btn])
if myWidget['inputs'][btn]['inputType']=='Entry' and ('text' in myWidget['inputs'][btn]) :
inputs[myWidget['name']+'_'+btn+'_LabelCanvas']={
'inputType':'LabelCanvas',
'padre':myWidget['name'],
'name':myWidget['name']+'_'+btn+'_LabelCanvas',
'xPlace':x,
'yPlace':y,
'ancho':self.anchoWidget(myWidget),
'alto':self.altoWidget(myWidget)*0.3+10,
'fontSize':int(myWidget['fontSize']*0.7),
'fontType':myWidget['fontType'],
'fontColor':myWidget['fontColor'],
'bgColor':myWidget['bgColor'],
'text':myWidget['inputs'][btn]['text']}
#self.SetWidget(atributos=inputs[myWidget['name']+'_'+btn+'_LabelCanvas'])
self.widgets[myWidget['name']]['inputs']=myWidget['inputs']
for i in inputs:
self.widgets[myWidget['name']]['inputs'][i]=inputs[i]
elif myWidget['inputType']=='formIn':
self.widgets.setdefault(myWidget['name'],myWidget)
elif myWidget['inputType']=='formOut':
self.widgets.setdefault(myWidget['name'],myWidget)
elif myWidget['inputType']=='Button':
myWidget['widget']=Button(
self.widgets[myWidget['padre']]['widget'],
text=myWidget['text'],
width=myWidget['width'] if myWidget['width'] else None,
bg=myWidget['bgColor'],
fg=myWidget['fontColor'],
font=(myWidget['fontType'],myWidget['fontSize']),
command=self.command[myWidget['command']]
)
myWidget['ancho']=self.anchoWidget(myWidget)
myWidget['alto']=self.altoWidget(myWidget)
self.widgets.setdefault(myWidget['name'],myWidget)
elif myWidget['inputType']=='Label':
myWidget['widget']=Label(
self.widgets[myWidget['padre']]['widget'],
text=myWidget['text'],
bg=myWidget['bgColor'],
fg=myWidget['fontColor'],
font=(myWidget['fontType'],myWidget['fontSize']))
myWidget['ancho']=self.anchoWidget(myWidget)
myWidget['alto']=self.altoWidget(myWidget)
self.widgets.setdefault(myWidget['name'],myWidget)
elif myWidget['inputType']=='LabelCanvas':
self.widgets[myWidget['padre']]['canvas'].create_text(myWidget['xPlace']+(myWidget['ancho']/2),myWidget['yPlace']+(myWidget['alto']/2),fill=myWidget['fontColor'],font=(myWidget['fontType'],myWidget['fontSize']), text=myWidget['text'])
elif myWidget['inputType']=='Entry':
if not 'value' in myWidget:
myWidget['value']=''
if myWidget['typeSalida'] in ['str','correo','date','nombre']:
self.Vars[myWidget['name']]=StringVar(value=str(myWidget['value']),name=myWidget['name'])
elif myWidget['typeSalida'] in ['int','edad']:
self.Vars[myWidget['name']]=IntVar(value=int(float(myWidget['value'])),name=myWidget['name'])
elif myWidget['typeSalida'] in ['float','moneda','magnitud']:
self.Vars[myWidget['name']]=DoubleVar(value=float(myWidget['value']),name=myWidget['name'])
elif myWidget['typeSalida'] in ['Boolean','bool']:
self.Vars[myWidget['name']]=BooleanVar(value=bool(myWidget['value']),name=myWidget['name'])
else:
self.Vars[myWidget['name']]=StringVar(value=str(myWidget['value']),name=myWidget['name'])
myWidget['widget']=Entry(
self.widgets[myWidget['padre']]['widget'],
textvariable=self.Vars[myWidget['name']],
width=myWidget['width'] if myWidget['width'] else None,
fg=myWidget['fontColor'],
insertbackground=myWidget['fontColor'],
bg=myWidget['bgColor'],
font=(myWidget['fontType'],myWidget['fontSize'])
)
self.Vars[myWidget['name']].trace('w',lambda name,arg2,mod : self.updateVar(name,arg2,mod))
self.widgets.setdefault(myWidget['name'],myWidget)
self.updateVar(myWidget['name'],' ','w')
elif myWidget['inputType']=='Checkbutton':
if not 'value' in myWidget:
myWidget['value']=''
self.Vars[myWidget['name']]=BooleanVar(value=bool(myWidget['value']),name=myWidget['name'])
myWidget['widget']=Checkbutton(
self.widgets[myWidget['padre']]['widget'],
text=myWidget['text'],
bg=myWidget['bgColor'],
fg=myWidget['fontColor'],
variable=self.Vars[myWidget['name']],
font=(myWidget['fontType'],myWidget['fontSize'])
)
self.Vars[myWidget['name']].trace('w',lambda name,arg2,mod : self.updateVar(name,arg2,mod))
myWidget['ancho']=self.anchoWidget(myWidget)
myWidget['alto']=self.altoWidget(myWidget)
self.widgets.setdefault(myWidget['name'],myWidget)
self.updateVar(myWidget['name'],' ','w')
elif myWidget['inputType']=='Radiobutton':
p=myWidget['name']+'_'+[k for k in myWidget['radios']][0]
#print([k for k in myWidget['radios']][0],p)
self.Vars[p]=StringVar(value=str(myWidget['value']),name=p)
x , y = 0 , 0
if 'text' in myWidget:
self.SetWidget(atributos={
'inputType':'Label',
'padre':myWidget['padre'],
'name':myWidget['name']+'_Label',
'xPlace':myWidget['xPlace']+x,
'yPlace':myWidget['yPlace']+y,
'ancho':self.anchoWidget(myWidget),
'alto':self.altoWidget(myWidget),
'fontSize':myWidget['fontSize'],
'fontType':myWidget['fontType'],
'fontColor':myWidget['fontColor'],
'bgColor':myWidget['bgColor'],
'text':myWidget['text']})
x=self.atrb['fontSizeToAlto'][myWidget['fontType']][myWidget['fontSize']]
if 'radios' in myWidget:
for r in myWidget['radios']:
y += (self.atrb['fontSizeToAlto'][myWidget['fontType']][myWidget['fontSize']])
myWidget['radios'][r]['inputType']=myWidget['inputType']
myWidget['radios'][r]['padre']=myWidget['padre']
myWidget['radios'][r]['name']=myWidget['name']+'_'+str(r)
myWidget['radios'][r]['xPlace']=myWidget['xPlace']+x
myWidget['radios'][r]['yPlace']=myWidget['yPlace']+y
myWidget['radios'][r]['bgColor']=myWidget['bgColor']
myWidget['radios'][r]['fontColor']=myWidget['fontColor']
myWidget['radios'][r]['fontType']=myWidget['fontType']
myWidget['radios'][r]['fontSize']=myWidget['fontSize']
myWidget['radios'][r]['ancho']=self.anchoWidget(myWidget)
myWidget['radios'][r]['alto']=self.altoWidget(myWidget)
myWidget['radios'][r]['widget']=Radiobutton(
self.widgets[myWidget['padre']]['widget'],
text=myWidget['radios'][r]['text'],
bg=myWidget['radios'][r]['bgColor'],
fg=myWidget['radios'][r]['fontColor'],
variable=self.Vars[p],
value=r,
font=(myWidget['radios'][r]['fontType'],myWidget['radios'][r]['fontSize']))
if 'command' in myWidget['radios'][r]:
myWidget['radios'][r]['widget'].config(command=self.command[myWidget['radios'][r]['command']])
self.widgets.setdefault(myWidget['radios'][r]['name'],myWidget['radios'][r])
self.Vars[p].trace('w',lambda name,arg2,mod : self.updateVar(name,arg2,mod))
#myWidget['ancho']=((11-myWidget['fontSize'])+(myWidget['width']+1)*(myWidget['fontSize']-1)+(myWidget['width']*self.atrb['fontSizeToCorrectorAncho'][myWidget['fontType']][myWidget['fontSize']]))
#myWidget['alto']=(self.atrb['fontSizeToAlto'][myWidget['fontType']][myWidget['fontSize']])
#self.widgets.setdefault(myWidget['name'],myWidget)
self.updateVar(p,' ','w')
self.INPUTS_CONFIG=self.recuperarVariable("inputType")
#print(myWidget['name'],self.widgets[myWidget['name']]['name'])
if 'crearTabla' in myWidget:
if (myWidget['crearTabla']) and (not myWidget['name'] in self.tablas):
campos = [i for i in myWidget['inputs'] if (myWidget['inputs'][i]['inputType'] in self.widgetConectadoaVars) ]
self.tablas[myWidget['name']]=[self.Sql.campoAutoincrement]+[campos[0]+' text not null UNIQUE']+campos[1:]
print(myWidget['name'],self.tablas[myWidget['name']])
self.Sql.CrearTabla(myWidget['name'],self.tablas[myWidget['name']],dirCRUD=self.pwd+os.path.sep+myWidget['subProyecto']+os.path.sep+myWidget['subProyecto']+self.Sql.extencionCRUD)
del myWidget
class geometria(object):
"""docstring for geometria"""
def __init__(self):
super(geometria, self).__init__()
def corrector(f):
def corregido(*arg,**kwargs):
ret = []
for a in [*arg]:
if ("list" in type(a)) and (len(a)<3):
ret += [[float(c) for c in a]+[0.0 for c in range(3-len(a))]]
else:
ret += [a]
ret=tuple(ret)
f_return= f(*ret,**kwargs)
return f_return
return corregido
@corrector
def colineales(self,A,B,C,decimales=4):
AB=self.resta(B,A)
AC=self.resta(C,A)
if int((AB[0]/AB[1])*(10**decimales))==int((AC[0]/AC[1])*(10**decimales)):
return True
else:
return False
@corrector
def modulo(self,A):
return (((A[0])**2)+((A[1])**2)+((A[2])**2))**(1/2)
@corrector
def vectorUnitario(self,A):
m=self.modulo(A)
return [A[0]/m,A[1]/m,A[2]/m]
def hypotenusa(self,catOp,catAd):
return (((catAd)**(2))+((catOp)**(2)))**(1/2)
@corrector
def dist(self,A,B):
#calcula la dsitancia entre A y B
return (((A[0]-B[0])**2)+((A[1]-B[1])**2)+((A[2]-B[2])**2))**(1/2)
@corrector
def coseno(self,A,B):
#retorna el valor del coseno del angulo formado entre los vectores A y B
divisor=(((((A[0])**2)+((A[1])**2)+((A[2])**2))**(1/2))*((((B[0])**2)+((B[1])**2)+((B[2])**2))**(1/2)))
if divisor!=0.0:
return ((A[0]*B[0]+A[1]*B[1]+A[2]*B[2])/divisor)
else:
return 3.1416/2
@corrector
def angRad(self,A,B):
return math.acos(self.coseno(A,B))
@corrector
def ang(self,A,B):
return math.degrees(math.acos(self.coseno(A,B)))
@corrector
def resta(self,A,B):
#retorna el vector AB o A-B considerando el punto A como el nuevo origen
return [B[0]-A[0],B[1]-A[1],B[2]-A[2]]
@corrector
def suma(self,A,B):
#retorna el vector A+B.
return [B[0]+A[0],B[1]+A[1],B[2]+A[2]]
@corrector
def medio(self,A,B):
#retorna el vector A+B.
return [(B[0]+A[0])/2,(B[1]+A[1])/2,(B[2]+A[2])/2]
@corrector
def alt(self,A,B,C):
#retorna el punto de origen del segmento que define la altura del triangulo A,B,C. Considerando al lado BC como la base del triangulo.
ab=self.dist(A,B)
bc=self.dist(B,C)
BA=self.resta(B,A)
BC=self.resta(B,C)
cosB=self.coseno(BA,BC)
x=((BC[0]/bc)*(cosB*ab))+B[0]
y=((BC[1]/bc)*(cosB*ab))+B[1]
z=((BC[2]/bc)*(cosB*ab))+B[2]
return [x,y,z]
@corrector
def rotar(self,rad,P):
#sobre el origen, rotar el angulo dado en sentido antiorario el punto P
x=P[0]*math.cos(rad)-P[1]*math.sin(rad)
y=P[0]*math.sin(rad)+P[1]*math.cos(rad)
return x,y
@corrector
def trasladar(self,O,P):
#trasladar el punto P, al nuevo origen O
x=P[0]+O[0]
y=P[1]+O[1]
z=P[2]+O[2]
return x, y ,z
|
dx_groups.py | #!/usr/bin/env python
# Adam Bowen - Aug 2017
# Description:
# This script will allow you to easily manage groups in Delphix
#
# Requirements
# pip install docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
"""Description
Usage:
dx_groups.py (--group_name <name> [--add | --delete])
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_groups.py (--list)
[--engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_groups.py -h | --help | -v | --version
Description
Examples:
dx_groups.py --debug --config delphixpy-examples/dxtools_1.conf --group_name Test --add
dx_groups.py --config delphixpy-examples/dxtools_1.conf --group_name Test --delete
dx_groups.py --list
Options:
--group_name <name> The name of the group
--add Add the identified group
--delete Delete the identified group
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_skel.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
VERSION = "v.0.0.002"
import sys
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import group
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web.vo import Group
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
from lib.DxLogging import print_info
from lib.GetReferences import find_all_objects
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
def add_group(group_name):
"""
This function adds the group
"""
group_obj = Group()
group_obj.name = group_name
try:
group.create(dx_session_obj.server_session, group_obj)
print ("Attempting to create {}".format(group_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Creating the group {} "
"encountered an error:\n{}".format(group_name, e)
)
sys.exit(1)
def delete_group(group_name):
"""
This function adds the group
"""
group_obj = find_obj_by_name(dx_session_obj.server_session, group, group_name)
try:
group.delete(dx_session_obj.server_session, group_obj.reference)
print ("Attempting to delete {}".format(group_name))
except (DlpxException, RequestError) as e:
print_exception(
"\nERROR: Deleting the group {} "
"encountered an error:\n{}".format(group_name, e)
)
sys.exit(1)
def list_groups():
"""
This function lists all groups
"""
group_list = find_all_objects(dx_session_obj.server_session, group)
for group_obj in group_list:
print ("Group: {}".format(group_obj.name))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
try:
# Setup the connection to the Delphix Engine
dx_session_obj.serversess(
engine["ip_address"], engine["username"], engine["password"]
)
except DlpxException as e:
print_exception(
"\nERROR: Engine {} encountered an error while"
"{}:\n{}\n".format(engine["hostname"], arguments["--target"], e)
)
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dx_session_obj.job_mode(single_thread):
while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo) > 0:
if arguments["--add"]:
add_group(arguments["--group_name"])
elif arguments["--delete"]:
delete_group(arguments["--group_name"])
elif arguments["--list"]:
list_groups()
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(
dx_session_obj.server_session, dx_session_obj.jobs[j]
)
print_debug(job_obj)
print_info(
"{}: Group: {}".format(engine["hostname"], job_obj.job_state)
)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in "RUNNING":
# If the job is in a running state, increment the
# running job count.
i += 1
print_info("{}: {:d} jobs running.".format(engine["hostname"], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments["--poll"]))
except (HttpError, RequestError, JobError, DlpxException) as e:
print_exception("ERROR: Could not complete group " "operation: {}".format(e))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
# Create an empty list to store threads we create.
threads = []
engine = None
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
# For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print("Error encountered in run_job():\n{}".format(e))
sys.exit(1)
elif arguments["--all"] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dx_session_obj.dlpx_engines[arguments["--engine"]]
print_info(
"Executing against Delphix Engine: {}\n".format(
(arguments["--engine"])
)
)
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException(
"\nERROR: Delphix Engine {} cannot be "
"found in {}. Please check your value "
"and try again. Exiting.\n".format(
arguments["--engine"], config_file_path
)
)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]["default"] == "true":
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info(
"Executing against the default Delphix Engine "
"in the dxtools.conf: {}".format(
dx_session_obj.dlpx_engines[delphix_engine]["hostname"]
)
)
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
# elapsed_minutes = round((time() - time_start)/60, +1)
# return elapsed_minutes
return round((time() - time_start) / 60, +1)
def main(arguments):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments["--debug"]:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments["--logdir"])
print_debug(arguments)
time_start = time()
single_thread = False
config_file_path = arguments["--config"]
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info(
"script took {:.2f} minutes to get this far.".format(elapsed_minutes)
)
# Here we handle what we do when the unexpected happens
except DlpxException as e:
print_exception(
"script encountered an error while processing the"
"config file:\n{}".format(e)
)
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception(
"Connection failed to the Delphix Engine"
"Please check the ERROR message:\n{}".format(e)
)
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception("A job failed in the Delphix Engine")
print_info(
"{} took {:.2f} minutes to get this far\n{}".format(
basename(__file__), elapsed_minutes, e
)
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info(
"{} took {:.2f} minutes to get this far\n".format(
basename(__file__), elapsed_minutes
)
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main(arguments)
|
bot.py | # -*- coding: utf-8 -*-
import YYK
import requests
from YYK.lib.curve.ttypes import *
from datetime import datetime
import time,datetime,random,sys,re,os,json,subprocess,codecs,threading,glob
cl = YYK.LINE()
cl.login(qr=True)
cl.loginResult()
ki = YYK.LINE()
ki.login(qr=True)
ki.loginResult()
ki2 = YYK.LINE()
ki2.login(qr=True)
ki2.loginResult()
ki3 = YYK.LINE()
ki3.login(qr=True)
ki3.loginResult()
ki4 = YYK.LINE()
ki4.login(qr=True)
ki4.loginResult()
ki5 = YYK.LINE()
ki5.login(qr=True)
ki5.loginResult()
ki6 = YYK.LINE()
ki6.login(qr=True)
ki6.loginResult()
ki7 = YYK.LINE()
ki7.login(qr=True)
ki7.loginResult()
ki8 = YYK.LINE()
ki8.login(qr=True)
ki8.loginResult()
ki9 = YYK.LINE()
ki9.login(qr=True)
ki9.loginResult()
ki10 = YYK.LINE()
ki10.login(qr=True)
ki10.loginResult()
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""—(••÷[ [̲̲̅̅PROTECTBOT] ]÷••)—
••¤(`×[¤ c͓̽o͓̽m͓̽m͓̽a͓̽n͓̽d͓̽ ¤]×´)¤••
☫[Mid @] Cek mid by tag
☫[Gift-Giftt] Fake Gift
☫[Mid] My mid
☫[All mid] All bot mid
☫[Mybio:「Text」] Update bio
☫[MyName:「Text」] Update name
☫[Allname:「Text」] Update name all bot
☫[Allbio:「Text」] Update all bio bot
☫[Mid:「mid」] Send Contact by mid
☫[Blocklist] Block contact list
☫[Creator] Creator bot
☫[Pesan set:「Text」]
☫[All bot] Contact semua bot
☫[Respon] Cek respon bot
☫[Ping] Spam chat
☫[999+ 「Text」] Spam chat 999+
☫[Bot say] Semua bot mengatakan hal yang diperintahkan
☫[Welcome] Greetings member baru
☫[Sp/Speed] Cek speed respon bot
☫[Absen] Absen semua bot
☫[Tagall] Tag semua member grup
☫[Kick:「mid」] Kick by mid
☫[Invite:「mid」] Invite by mid
☫[Cancel] Cancel all invitation
☫[Oqr/Cqr] Open/Close QR
☫[Banlist] Banned List
☫[Ginfo] Grup info
☫[All join] Semua bot join
☫[All out] Semua bot out
☫[Gn 「Nama Grup」] Ganti nama grup
☫[Clone 1-10] Meniru contact
☫[Clone all] Meniru contact pada semua bot
☫[Url] Link grup
☫[Kick ] Kick by tag
☫[Ban ] Ban by tag
☫[Unban ] Unban by tag
☫[Ban:] Ban by mid
☫[Unban:] Unban by mid
☫[Ratakan] Musnahkan grup
☫[Set] Cek status command protect/command auto
i͓̽{•--» p͓̽r͓̽o͓̽t͓̽e͓̽c͓̽t͓̽ c͓̽o͓̽m͓̽m͓̽a͓̽n͓̽d͓̽ «--•}i͓̽
☫[Protect 「On/Off」]
☫[Qr 「On/Off」]
☫[Cancel 「On/Off」]
☫[Invite 「On/Off」]
☫[Group Cancel:] Cancel member invitation berikutnya
i͓̽{•--» a͓̽u͓̽t͓̽o͓̽ c͓̽o͓̽m͓̽m͓̽a͓̽n͓̽d͓̽ «--•}i͓̽
☫[Auto add 「On/Off」]
☫[Auto cancel 「On/Off」]
☫[Auto leave 「On/Off」]
☫[Auto join 「On/Off」]
☫[Share 「On/Off」]
☫[Contact 「On/Off」]
(っ◔◡◔)っ ♥ HOPE YOU LIKE IT ♥
"""
KAC=[cl,ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
ki6mid = ki6.getProfile().mid
ki7mid = ki7.getProfile().mid
ki8mid = ki8.getProfile().mid
ki9mid = ki9.getProfile().mid
ki10mid = ki10.getProfile().mid
Bots=[mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid,ki6mid,ki7mid,ki8mid,ki9mid,ki10mid]
admin = ["udc7ce8646bbe9a2c6b32ff4d0ba586e1","u74927467b80b399ca6e5c9082dd910b9"]
admsa = ["udc7ce8646bbe9a2c6b32ff4d0ba586e1","u74927467b80b399ca6e5c9082dd910b9"]
wait = {
'contact':False,
'autoJoin':False,
'autoCancel':{"on":True,"members":3},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""Thanks for adding me as a friend
≫ If i not answer just spam≪
≫ Slow respon at 8pm till 6pm ≪
Ready:
≫ bot protect ≪
≫ SelfBot ≪
(っ◔◡◔)っ """,
"lang":"JP",
"comment":"Auto Like by @",
"commentOn":True,
"commentBlack":{},
"likeOn":True,
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def cloneContactProfile(self, mid):
contact = self.getContact(mid)
profile = self.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
self.updateDisplayPicture(profile.pictureStatus)
return self.updateProfile(profile)
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n�9�9" + Name
wait2['ROM'][op.param1][op.param2] = "�9�9" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == ["udc7ce8646bbe9a2c6b32ff4d0ba586e1","u74927467b80b399ca6e5c9082dd910b9"]:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text == "Help":
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpMessage)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot1 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot2 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot2 gn ","")
ki2.updateGroup(X)
else:
ki2.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot3 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot3 gn ","")
ki3.updateGroup(X)
else:
ki3.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot4 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot4 gn ","")
ki4.updateGroup(X)
else:
ki4.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot5 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot5 gn ","")
ki5.updateGroup(X)
else:
ki5.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot6 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot6 gn ","")
ki6.updateGroup(X)
else:
ki6.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot7 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot7 gn ","")
ki7.updateGroup(X)
else:
ki7.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot8 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot8 gn ","")
ki8.updateGroup(X)
else:
ki8.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot9 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot9 gn ","")
ki9.updateGroup(X)
else:
ki9.sendText(msg.to,"It can't be used besides the group.")
elif ("Bot10 gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.name = msg.text.replace("Bot10 gn ","")
ki9.updateGroup(X)
else:
ki9.sendText(msg.to,"It can't be used besides the group.")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
cl.kickoutFromGroup(msg.to,[midd])
elif 'Invite: ' in msg.text:
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text == 'My bot':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
cl.sendMessage(msg)
elif msg.text == 'Absen bot':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
ki10.sendMessage(msg)
elif msg.text == 'Bot':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text == 'Bot1':
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif msg.text == 'Bot2':
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif msg.text == 'Bot3':
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif msg.text == 'Bot4':
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif msg.text == 'Bot5':
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif msg.text == 'Bot6':
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif msg.text == 'Bot7':
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
ki7.sendMessage(msg)
elif msg.text == 'Bot8':
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
ki8.sendMessage(msg)
elif msg.text == 'Bot9':
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
ki9.sendMessage(msg)
elif msg.text == 'Bot10':
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
ki10.sendMessage(msg)
elif msg.text == 'Gift':
msg.contentType = 9
msg.contentMetadata={'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '5',
'STKPKGID': '1380280'}
msg.text = None
cl.sendMessage(msg)
elif msg.text == 'Giftt':
msg.contentType = 9
msg.contentMetadata={'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '5',
'STKPKGID': '1319678'}
msg.text = None
cl.sendMessage(msg)
elif msg.text == 'Cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan")
else:
cl.sendText(msg.to,"Invitan tidak ada")
elif msg.text == 'Oqr':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
ki.sendText(msg.to,"QR Dibuka")
else:
ki.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"It can not be used outside the group")
else:
ki.sendText(msg.to,"Can not be used for groups other than")
elif msg.text == 'Cqr':
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
ki2.sendText(msg.to,"QR Ditutup")
else:
ki2.sendText(msg.to,"QR Ditutup")
else:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"It can not be used outside the group")
else:
ki2.sendText(msg.to,"Can not be used for groups other than")
elif msg.text.lower() == 'inv:gcreator':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
cl.inviteIntoGroup(msg.to,[gcmid])
else:
cl.inviteIntoGroup(msg.to,[gcmid])
elif msg.text.lower() == 'ginfo':
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[Nama Grup]\n" + str(ginfo.name) + "\n\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
elif msg.text == 'Mid':
cl.sendText(msg.to,mid)
elif msg.text.lower() == 'kb mid':
ki.sendText(msg.to,kimid)
elif msg.text.lower() == 'kb2 mid':
ki2.sendText(msg.to,ki2mid)
elif msg.text.lower() == 'kb3 mid':
ki3.sendText(msg.to,ki3mid)
elif msg.text.lower() == 'kb4 mid':
ki4.sendText(msg.to,ki4mid)
elif msg.text.lower() == 'kb5 mid':
ki5.sendText(msg.to,ki5mid)
elif "All mid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
ki6.sendText(msg.to,ki6mid)
ki7.sendText(msg.to,ki7mid)
ki8.sendText(msg.to,ki8mid)
ki9.sendText(msg.to,ki9mid)
ki10.sendText(msg.to,ki10mid)
ki11.sendText(msg.to,ki11mid)
ki12.sendText(msg.to,ki12mid)
ki13.sendText(msg.to,ki13mid)
ki14.sendText(msg.to,ki14mid)
ki15.sendText(msg.to,ki15mid)
ki16.sendText(msg.to,ki16mid)
ki17.sendText(msg.to,ki16mid)
elif "TL:" in msg.text:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname:" in msg.text:
string = msg.text.replace("All:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki7.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki8.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki9.getProfile()
profile.displayName = string
ki9.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki10.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
cl.sendText(msg.to,"Nama berubah menjadi " + string + "")
elif "Allbio:" in msg.text:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki6.getProfile()
profile.statusMessage = string
ki6.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki7.getProfile()
profile.statusMessage = string
ki7.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki8.getProfile()
profile.statusMessage = string
ki8.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki9.getProfile()
profile.statusMessage = string
ki9.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki10.getProfile()
profile.statusMessage = string
ki10.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
elif "Myname:" in msg.text:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Names Menjadi : " + string + "")
#---------------------------------------------------------
elif "1name:" in msg.text:
string = msg.text.replace("1name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "2name:" in msg.text:
string = msg.text.replace("2name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "3name:" in msg.text:
string = msg.text.replace("3name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "4name:" in msg.text:
string = msg.text.replace("4name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "Mybio:" in msg.text:
string = msg.text.replace("Mybio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio👉" + string + "")
#--------------------------------------------------------
elif "5name:" in msg.text:
string = msg.text.replace("5name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "6name:" in msg.text:
string = msg.text.replace("6name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
ki6.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "7name:" in msg.text:
string = msg.text.replace("7name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki7.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
ki7.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "8name:" in msg.text:
string = msg.text.replace("8name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki8.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
ki8.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "9name:" in msg.text:
string = msg.text.replace("9name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki9.getProfile()
profile.displayName = string
ki9.updateProfile(profile)
ki9.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "10name:" in msg.text:
string = msg.text.replace("10name:","")
if len(string.decode('utf-8')) <= 30:
profile = ki10.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
ki10.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "11names:" in msg.text:
string = msg.text.replace("11names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki11.getProfile()
profile.displayName = string
ki11.updateProfile(profile)
ki11.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "12names:" in msg.text:
string = msg.text.replace("12names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki12.getProfile()
profile.displayName = string
ki12.updateProfile(profile)
ki12.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "13names:" in msg.text:
string = msg.text.replace("13names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki13.getProfile()
profile.displayName = string
ki13.updateProfile(profile)
ki13.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "14names:" in msg.text:
string = msg.text.replace("14names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki14.getProfile()
profile.displayName = string
ki14.updateProfile(profile)
ki14.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "15names:" in msg.text:
string = msg.text.replace("15names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki15.getProfile()
profile.displayName = string
ki15.updateProfile(profile)
ki15.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "16names:" in msg.text:
string = msg.text.replace("16names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki16.getProfile()
profile.displayName = string
ki16.updateProfile(profile)
ki16.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "17names:" in msg.text:
string = msg.text.replace("17names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki17.getProfile()
profile.displayName = string
ki17.updateProfile(profile)
ki17.sendText(msg.to,"Update Names Menjadi :" + string + "")
elif "18names:" in msg.text:
string = msg.text.replace("18names:","")
if len(string.decode('utf-8')) <= 30:
profile = ki18.getProfile()
profile.displayName = string
ki18.updateProfile(profile)
ki18.sendText(msg.to,"Update Names Menjadi :" + string + "")
#--------------------------------------------------------
elif "Mid:" in msg.text:
mmid = msg.text.replace("Mid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text == 'Contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"contact set to on")
else:
ki.sendText(msg.to,"contact already on")
else:
wait["contact"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"contact set to on")
else:
ki.sendText(msg.to,"contact already on")
elif msg.text == 'Contact on':
if wait["contact"] == False:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"contact set to off")
else:
ki2.sendText(msg.to,"contact already off")
else:
wait["contact"] = False
if wait["lang"] == "JP":
ki2.sendText(msg.to,"contact set to off")
else:
ki2.sendText(msg.to,"contact already off")
elif msg.text == 'Protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protection set to on")
else:
ki3.sendText(msg.to,"Protection already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protection set to on")
else:
ki3.sendText(msg.to,"Protection already on")
elif msg.text == 'Qr on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Protection Qr set to on")
else:
ki4.sendText(msg.to,"Protection Qr already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Protection Qr set to on")
else:
ki4.sendText(msg.to,"Protection Qr already on")
elif msg.text == 'Invite on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Protection Invite set to on")
else:
ki5.sendText(msg.to,"Protection Invite already on")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Protection Invite set to on")
else:
ki5.sendText(msg.to,"Protection Invite already on")
elif msg.text == 'Cancel on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Cancel Protection set to on")
else:
ki6.sendText(msg.to,"Cancel Protection already on")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Cancel Protection set to on")
else:
ki6.sendText(msg.to,"Cancel Protection already on")
elif msg.text == 'Auto join on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
ki7.sendText(msg.to,"Autojoin set to on")
else:
ki7.sendText(msg.to,"Autojoin already on")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
ki7.sendText(msg.to,"Autojoin set to on")
else:
ki7.sendText(msg.to,"Autojoin already on")
elif msg.text.lower() == 'blocklist':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text == 'Auto join off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
ki8.sendText(msg.to,"Autojoin set to off")
else:
ki8.sendText(msg.to,"Autojoin already off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
ki8.sendText(msg.to,"Autojoin set to off")
else:
ki8.sendText(msg.to,"Autojoin already off")
elif msg.text == 'Protect off':
if wait["protect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Protection set to off")
else:
ki.sendText(msg.to,"Protection already off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Protection set to off")
else:
ki.sendText(msg.to,"Protection already off")
elif msg.text == 'Qr off':
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Protection Qr set to off")
else:
ki2.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Protection Qr set to off")
else:
ki2.sendText(msg.to,"Protection Qr already off")
elif msg.text == 'Invite off':
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protection Invite set to off")
else:
ki3.sendText(msg.to,"Protection Invite already off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protection Invite set to off")
else:
ki3.sendText(msg.to,"Protection Invite already off")
elif msg.text == 'Cancel off':
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Cancel Protection Invite set to off")
else:
ki4.sendText(msg.to,"Cancel Protection Invite already off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Cancel Protection Invite set to off")
else:
ki4.sendText(msg.to,"Cancel Protection Invite already off")
elif "Group cancel:" in msg.text:
try:
strnum = msg.text.replace("Group cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Itu off undangan ditolak👈\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
cl.sendText(msg.to,"Off undangan ditolak👈Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar")
else:
cl.sendText(msg.to,"Weird value🛡")
elif msg.text == 'Auto leave on':
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Auto Leave room set to on")
else:
ki5.sendText(msg.to,"Auto Leave room already on")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Auto Leave room set to on")
else:
ki5.sendText(msg.to,"Auto Leave room already on")
elif msg.text == 'Auto leave off':
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Auto Leave room set to off")
else:
ki6.sendText(msg.to,"Auto Leave room already off")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Auto Leave room set to off")
else:
ki6.sendText(msg.to,"Auto Leave room already off")
elif msg.text == 'Share on':
if wait["timeline"] == True:
if wait["lang"] == "JP":
ki7.sendText(msg.to,"Share set to on")
else:
ki7.sendText(msg.to,"Share already on")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Share set to on")
else:
cl.sendText(msg.to,"Share already on")
elif msg.text == 'Share off':
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Share set to off")
else:
cl.sendText(msg.to,"Share already off")
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+="Contact:on \n"
else: md+="Contact:off\n"
if wait["autoJoin"] == True: md+="Auto Join:on \n"
else: md +="Auto Join:off\n"
if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel:off \n"
if wait["leaveRoom"] == True: md+="Auto leave:on \n"
else: md+="Auto leave:off \n"
if wait["timeline"] == True: md+="Share:on \n"
else:md+="Share:off \n"
if wait["autoAdd"] == True: md+="Auto add:on \n"
else:md+="Auto add:off \n"
if wait["protect"] == True: md+="Protect:on \n"
else:md+="Protect:off \n"
if wait["linkprotect"] == True: md+="Link Protect:on \n"
else:md+="Link Protect:off \n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on \n"
else:md+="Invitation Protect:off \n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on \n"
else:md+="Cancel Protect:off \n"
cl.sendText(msg.to,md)
msg.contentType = 13
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
ki10.sendText(msg.to,"downdowndown����downdowndowndown")
ki10.sendMessage(msg)
ki10.sendText(msg.to,"upupup������upupupup")
elif "Album:" in msg.text:
gid = msg.text.replace("Album:","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada album")
else:
cl.sendText(msg.to,"Dalam album tidak")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "\n"
else:
mg += str(y["title"]) + ":0 Pieces\n"
cl.sendText(msg.to,mg)
elif msg.text.lower() == 'group id':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text == 'All bot':
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
ki10.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki11mid}
ki11.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki12mid}
ki12.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki13mid}
ki13.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki14mid}
ki14.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki15mid}
ki15.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki16mid}
ki16.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki17mid}
ki17.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki18mid}
ki18.sendMessage(msg)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text.lower() == 'gcancel':
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku menolak semua undangan")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "Hapus:" in msg.text:
gid = msg.text.replace("Hapus:","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["gid"])
cl.sendText(msg.to,str(i) + "Soal album telah dihapus")
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Soal album telah dihapus")
else:
cl.sendText(msg.to,str(i) + "Hapus kesulitan album")
elif msg.text.lower() == 'add on':
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to on")
else:
cl.sendText(msg.to,"Auto add already on")
elif msg.text.lower() == 'add off':
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto add set to off")
else:
cl.sendText(msg.to,"Auto add already off")
elif "Pesan set:" in msg.text:
wait["message"] = msg.text.replace("Pesan set:","")
cl.sendText(msg.to,"We changed the message")
elif msg.text.lower() == 'pesan cek':
if wait["lang"] == "JP":
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di")
else:
cl.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ")
else:
cl.sendText(msg.to,"è¦äº†å¼€")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off")
else:
cl.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text.lower() == 'url':
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text.lower() == 'url1':
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
ki.updateGroup(g)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
ki.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif 'gurl ' in msg.text.lower():
if msg.toType == 2:
gid = msg.text.replace("Gurl ","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist🛡")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist🛡")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklist🛡")
else:
cl.sendText(msg.to,"The following is a blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam set on")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Jam")
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki18.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki18.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki18.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------------------
elif ("Kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).sendText(msg.to,"Error")
elif ("1kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
ki.sendText(msg.to,"Error")
elif ("2kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("3kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki3.kickoutFromGroup(msg.to,[target])
except:
ki3.sendText(msg.to,"Error")
elif ("4kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki4.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif ("5kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
#-----------------------------------------------------------
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
random.choice(KAC).sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
random.choice(KAC).sendText(msg.to,"Error")
elif ("Test " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg)
cl.sendMessage(msg,target)
except:
cl.sendText(msg.to,"Gift send to member")
elif "Unban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
random.choice(KAC).sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
random.choice(KAC).sendText(msg.to,_nametarget + " Not In Blacklist")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
cl.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,_name + " Delete From Blacklist")
except:
cl.sendText(msg.to,_name + " Not In Blacklist")
#-----------------------------------------------------------
#-----------------------------------------------------------
elif "Offline" in msg.text:
midd = msg.text.replace("Offline","")
wait["blacklist"][midd] = True
cl.sendText(msg.to,"Target Lock")
elif cms(msg.text,["Sc off"]):
cl.sendText(msg.to,"Script Off")
exit(1)
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
#-----------------------------------------------------------
elif msg.text == 'Absen':
profile = ki.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki2.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki3.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki4.sendText(msg.to, text)
profile = ki5.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki5.sendText(msg.to, text)
profile = ki6.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki6.sendText(msg.to, text)
profile = ki7.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki7.sendText(msg.to, text)
profile = ki8.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki8.sendText(msg.to, text)
profile = ki9.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki9.sendText(msg.to, text)
profile = ki10.getProfile()
text = profile.displayName + " Hadir brodouble thumbs upHar Har"
ki10.sendText(msg.to, text)
#-----------------------------------------------------------speed
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
random.choice(KAC).sendText(msg.to,"Send Contact")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
random.choice(KAC).sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'mcheck':
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to," Nothing in the blacklist")
else:
random.choice(KAC).sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += ">" +cl.getContact(mi_d).displayName + "\n"
ki7.sendText(msg.to,mc)
elif msg.text == 'Banlist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += ">" +cl.getContact(mm).displayName + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "Blacklist User")
elif msg.text == 'Kill ban':
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Daftar hitam pengguna tidak memiliki")
return
for jj in matched_list:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif msg.text == 'Cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled")
elif "Spam album:" in msg.text:
try:
albumtags = msg.text.replace("Spam album:","")
gid = albumtags[:33]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,"We created an album" + name)
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text == 'All join':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki11.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki12.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki13.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki14.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki15.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki16.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki17.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ki18.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = random.choice(KAC).getGroup(msg.to)
ginfo = random.choice(KAC).getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "1 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "2 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "3 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "4 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "5 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif "6 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
#-----------------------------------------------
elif "7 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
#-----------------------------------------------
elif "8 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki8.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki8.updateGroup(G)
#-----------------------------------------------
elif "9 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki9.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki9.updateGroup(G)
elif "10 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
k10.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki10.updateGroup(G)
#-----------------------------------------------
elif msg.text == 'All out':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.sendText(msg.to,"double thumbs upHar Har Byee Bye " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "1 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "2 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "3 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "4 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "5 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "6 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki6.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "7 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki7.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "8 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki8.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "9 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki9.leaveGroup(msg.to)
except:
pass
elif "10 out" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------MISC-----------------------------------------------#
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Kalem...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif "Wc" in msg.text:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif "Hi " in msg.text:
bctxt = msg.text.replace("Hi ","")
ki9.sendText(msg.to,(bctxt))
elif "Bot say " in msg.text:
bctxt = msg.text.replace("Bot say ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki11.sendText(msg.to,(bctxt))
ki12.sendText(msg.to,(bctxt))
ki13.sendText(msg.to,(bctxt))
ki14.sendText(msg.to,(bctxt))
ki15.sendText(msg.to,(bctxt))
ki16.sendText(msg.to,(bctxt))
ki17.sendText(msg.to,(bctxt))
elif "Say " in msg.text:
bctxt = msg.text.replace("Say ","")
random.choice(KAC).sendText(msg.to,(bctxt))
elif "Spam " in msg.text:
bctxt = msg.text.replace("Spam ","")
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
elif msg.text in ["Sendspam","sendspam"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
elif "999+ " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("999+ ", "")
t = cl.getAllContactIds()
t = 100
while(t):
cl.sendText(msg.to, (bctxt))
t-=1
t = ki.getAllContactIds()
t = 100
while(t):
ki.sendText(msg.to, (bctxt))
t-=1
t = ki2.getAllContactIds()
t = 100
while(t):
ki2.sendText(msg.to, (bctxt))
t-=1
t = ki3.getAllContactIds()
t = 100
while(t):
ki3.sendText(msg.to, (bctxt))
t-=1
t = ki4.getAllContactIds()
t = 100
while(t):
ki4.sendText(msg.to, (bctxt))
t-=1
t = ki5.getAllContactIds()
t = 100
while(t):
ki5.sendText(msg.to, (bctxt))
t-=1
t = ki6.getAllContactIds()
t = 100
while(t):
ki6.sendText(msg.to, (bctxt))
t-=1
t = ki7.getAllContactIds()
t = 100
while(t):
ki7.sendText(msg.to, (bctxt))
t-=1
t = ki8.getAllContactIds()
t = 100
while(t):
ki8.sendText(msg.to, (bctxt))
t-=1
t = ki9.getAllContactIds()
t = 100
while(t):
ki9.sendText(msg.to, (bctxt))
t-=1
elif msg.text.lower() == 'ig':
cl.sendText(msg.to,"Hei dude,😎\nPlease don't follow your dreams.💀\n😘")
elif msg.text.lower() == 'ping':
ki.sendText(msg.to,"Ping ")
ki2.sendText(msg.to,"Ping ")
ki3.sendText(msg.to,"Ping ")
ki4.sendText(msg.to,"Ping ")
ki5.sendText(msg.to,"Ping ")
ki6.sendText(msg.to,"Ping ")
ki7.sendText(msg.to,"Ping ")
ki8.sendText(msg.to,"Ping ")
ki9.sendText(msg.to,"Ping ")
#-----------------------------------------------TAG ALL-----------------------------------------------#
elif msg.text in ["Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(5)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(6)
akh = akh + 1
cb2 += "@nrik\n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------------------------------------------------------
elif "Steal dp @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal dp @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#-------------------------------------------------------------
#-----------------------------------------------TAG ALL-----------------------------------------------#
elif "Respon" in msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,"I'm here")
ki2.sendText(msg.to,"Iya kk aku disini")
ki3.sendText(msg.to,"Aku juga ^_^")
ki4.sendText(msg.to,"Aku juga dong")
ki5.sendText(msg.to,"Aku juga lohhHar Har")
ki6.sendText(msg.to,"Iya kk aku masih hadir")
ki7.sendText(msg.to,"Kalian Jelek")
ki8.sendText(msg.to,"Kamu yang jelek anjing")
ki9.sendText(msg.to,"Kok kamu anjing ya")
ki10.sendText(msg.to,"Sudah jangan berantem kakak ┑( ̄▽ ̄)┍")
#-----------------------------------------------GRUP LIST-----------------------------------------------#
elif "Glist" in msg.text:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "=> %s \n" % (cl.getGroup(i).name + " | Members : [ " + str(len (cl.getGroup(i).members))+" ]")
cl.sendText(msg.to, "#[List Grup]# \n"+ h +"Total Group : " +"[ "+str(len(gid))+" ]")
#-----------------------------------------------GRUP LIST-----------------------------------------------#
elif msg.text in ["Gcreator"]:
if msg.toType == 2:
msg.contentType = 13
ginfo = ki2.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
msg.contentMetadata = {'mid': gCreator}
gCreator1 = ginfo.creator.displayName
except:
gCreator = "Error"
random.choice(KAC).sendText(msg.to, "Grup Creator : " + gCreator1)
random.choice(KAC).sendMessage(msg)
elif "Mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif ("Cek " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"[Mid] \n\n" + key1)
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Telah kembali semula")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Clone all " in msg.text:
if msg.from_ in admsa:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
ki2.CloneContactProfile(target)
ki3.CloneContactProfile(target)
ki4.CloneContactProfile(target)
ki5.CloneContactProfile(target)
ki6.CloneContactProfile(target)
ki7.CloneContactProfile(target)
ki8.CloneContactProfile(target)
ki9.CloneContactProfile(target)
ki10.CloneContactProfile(target)
cl.sendText(msg.to,"Clone Success")
ki.sendText(msg.to,"Clone Success")
ki2.sendText(msg.to,"Clone Success")
ki3.sendText(msg.to,"Clone Success")
ki4.sendText(msg.to,"Clone Success")
ki5.sendText(msg.to,"Clone Success")
ki6.sendText(msg.to,"Clone Success")
ki7.sendText(msg.to,"Clone Success")
ki8.sendText(msg.to,"Clone Success")
ki9.sendText(msg.to,"Clone Success")
ki10.sendText(msg.to,"Clone Success")
except Exception as e:
cl.sendText(msg.to,"Clone Fail")
print e
elif "Clone " in msg.text:
if msg.from_ in admsa:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to,"Clone Success")
except Exception as e:
cl.sendText(msg.to,"Clone Fail")
print e
elif "1clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to,"Clone Success")
except Exception as e:
ki.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "2clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.CloneContactProfile(target)
ki2.sendText(msg.to,"Clone Success")
except Exception as e:
ki2.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "3clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki3.CloneContactProfile(target)
ki3.sendText(msg.to,"Clone Success")
except Exception as e:
ki3.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "4clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki4.CloneContactProfile(target)
ki4.sendText(msg.to,"Clone Success")
except Exception as e:
ki4.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "5clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.CloneContactProfile(target)
ki5.sendText(msg.to,"Clone Success")
except Exception as e:
ki5.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "6clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki6.CloneContactProfile(target)
ki6.sendText(msg.to,"Clone Success")
except Exception as e:
ki6.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "7clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki7.CloneContactProfile(target)
ki7.sendText(msg.to,"Clone Success")
except Exception as e:
ki7.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "8clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki8.CloneContactProfile(target)
ki8.sendText(msg.to,"Clone Success")
except Exception as e:
ki8.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "9clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki9.CloneContactProfile(target)
ki9.sendText(msg.to,"Clone Success")
except Exception as e:
ki9.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif "10clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki10.CloneContactProfile(target)
ki10.sendText(msg.to,"Clone Success")
except Exception as e:
ki10.sendText(msg.to,"Clone Fail")
print "Clone Execute"
elif ("Bye " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
elif "Ratakan" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ratakan","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
gs = ki7.getGroup(msg.to)
gs = ki8.getGroup(msg.to)
gs = ki9.getGroup(msg.to)
gs = ki10.getGroup(msg.to)
cl.sendText(msg.to,"Perintah dilaksanakan")
cl.sendText(msg.to,"Goodbye all ^_^")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in Bots and admin:
try:
klist=[cl,ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Done")
print "[Command]Cleanse executed"
elif ".Youtube " in msg.text:
query = msg.text.replace(".Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'http://www.youtube.com' + a['href'] + a['title'])
elif ".Xvideo " in msg.text:
query = msg.text.replace(".Xvideo ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://www.xvideos.com/result'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'https://www.xvideos.com' + a['href'] + a['title'])
elif "Mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on")
ki.sendText(msg.to,"Mimic on")
else:
cl.sendText(msg.to,"Mimic already on")
ki.sendText(msg.to,"Mimic already on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off")
ki.sendText(msg.to,"Mimic off")
else:
cl.sendText(msg.to,"Mimic already off")
ki.sendText(msg.to,"Mimic already off")
elif "add:" in cmd:
target0 = msg.text.replace("Mimic:add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
gInfo = ki.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
ki.sendText(msg.to,"No targets")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
ki.sendText(msg.to,"Success added target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed")
ki.sendText(msg.to,"Failed")
break
elif "del:" in cmd:
target0 = msg.text.replace("Mimic:del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
gInfo = ki.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
ki.sendText(msg.to,"No targets")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
ki.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"Failed!")
ki.sendText(msg.to,"Failed!")
break
elif cmd == "ListTarget":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
ki.sendText(msg.to,"No target")
else:
lst = "<<Lit Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n->" + cl.getContact(mi_d).displayName + ki.getContact(mi_d).displayName +" | " + stat
cl.sendText(msg.to,lst + "\nTotal:" + total)
ki.sendText(msg.to,lst + "\nTotal:" + total)
if op.type == 19:
if op.param2 not in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in ki6mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki7mid:
if op.param2 in ki6mid:
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = False
ki6.updateGroup(G)
Ticket = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki6.updateGroup(G)
Ticket = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(G)
elif op.param3 in ki8mid:
if op.param2 in ki7mid:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
else:
G = ki7.getGroup(op.param1)
ki7.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
elif op.param3 in ki9mid:
if op.param2 in ki8mid:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(G)
else:
G = ki8.getGroup(op.param1)
ki8.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(G)
elif op.param3 in ki10mid:
if op.param2 in ki9mid:
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = False
ki9.updateGroup(G)
Ticket = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(G)
else:
G = ki9.getGroup(op.param1)
ki9.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki9.updateGroup(G)
Ticket = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(G)
elif op.param3 in ki11mid:
if op.param2 in ki10mid:
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = False
ki10.updateGroup(G)
Ticket = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(G)
else:
G = ki10.getGroup(op.param1)
ki10.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki10.updateGroup(G)
Ticket = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(G)
elif op.param3 in ki12mid:
if op.param2 in ki11mid:
G = ki11.getGroup(op.param1)
G.preventJoinByTicket = False
ki11.updateGroup(G)
Ticket = ki11.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki11.updateGroup(G)
else:
G = ki11.getGroup(op.param1)
ki11.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki11.updateGroup(G)
Ticket = ki11.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki11.updateGroup(G)
elif op.param3 in ki13mid:
if op.param2 in ki12mid:
G = ki12.getGroup(op.param1)
G.preventJoinByTicket = False
ki12.updateGroup(G)
Ticket = ki12.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki12.updateGroup(G)
else:
G = ki12.getGroup(op.param1)
ki12.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki12.updateGroup(G)
Ticket = ki12.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki12.updateGroup(G)
elif op.param3 in ki14mid:
if op.param2 in ki13mid:
G = ki13.getGroup(op.param1)
G.preventJoinByTicket = False
ki13.updateGroup(G)
Ticket = ki13.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki13.updateGroup(G)
else:
G = ki13.getGroup(op.param1)
ki13.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki13.updateGroup(G)
Ticket = ki13.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki13.updateGroup(G)
elif op.param3 in ki15mid:
if op.param2 in ki14mid:
G = ki14.getGroup(op.param1)
G.preventJoinByTicket = False
ki14.updateGroup(G)
Ticket = ki14.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki14.updateGroup(G)
else:
G = ki14.getGroup(op.param1)
ki14.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki14.updateGroup(G)
Ticket = ki14.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki14.updateGroup(G)
elif op.param3 in ki16mid:
if op.param2 in ki15mid:
G = ki15.getGroup(op.param1)
G.preventJoinByTicket = False
ki15.updateGroup(G)
Ticket = ki15.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki15.updateGroup(G)
else:
G = ki15.getGroup(op.param1)
ki15.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki15.updateGroup(G)
Ticket = ki15.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
ki11.acceptGroupInvitationByTicket(op.param1,Ticket)
ki12.acceptGroupInvitationByTicket(op.param1,Ticket)
ki13.acceptGroupInvitationByTicket(op.param1,Ticket)
ki14.acceptGroupInvitationByTicket(op.param1,Ticket)
ki15.acceptGroupInvitationByTicket(op.param1,Ticket)
ki16.acceptGroupInvitationByTicket(op.param1,Ticket)
ki17.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki15.updateGroup(G)
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
cl.cancelGroupInvitation(op.param1,[contact.mid for contact in cl.getGroup(op.param1).invitee])
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G =ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"Jangan maenin QR!")
else:
cl.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 17:
if op.param1 in Bots:
group = random.choice(KAC).getGroup(op.param1)
cb = Message()
cb.to = op.param1
cb.text = "Ada member baru gaes namanya " + random.choice(KAC).getContact(op.param2).displayName + "ლ(╹◡╹ლ)"
random.choice(KAC).sendMessage(cb)
cb2 = Message()
cb2.to = op.param1
cb2.text = "Welcome " + random.choice(KAC).getContact(op.param2).displayName + " di grup [" + group.name + "]"
cl.sendMessage(cb2)
cb3 = Message()
cb3.to = op.param1
cb3.text = "Semoga betah ya (◠‿◠)" + "\n\nCreator Grup=> " + group.creator.displayName
random.choice(KAC).sendMessage(cb3)
if op.type == 15:
if op.param2 in Bots:
return
cl.sendText(op.param1, "(っ◔◡◔)っ Goodbye")
random.choice(KAC).sendText(op.param1, "Jangan balik lagi (づ ̄ ³ ̄)づ")
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "ツ" + Name
wait2['ROM'][op.param1][op.param2] = "ツ" + Name
else:
cl.sendText
except:
pass
if op.type == 26:
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
count = 1
while True:
try:
for posts in ki.activity(1)['result']['posts']:
if posts['postInfo']['liked'] is False:
if wait['likeOn'] == True:
cl.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki1.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki2.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki3.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki4.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki5.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki6.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki7.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki8.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki9.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
ki10.like(posts['userInfo']['writerMid'],posts['postInfo']['postId'], 1001)
print "Like"
if wait['commentOn'] == True:
if posts['userInfo']['writerMid'] in wait['commentBlack']:
pass
else:
cl.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki2.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki3.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki4.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki5.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki6.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki7.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki8.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki9.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
ki10.comment(posts['userInfo']['writerMid'],posts['postInfo']['postId'],wait['comment'])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
44_2_multithread_GIL.py | from threading import Thread
def do_sth():
while True:
pass
Thread(target=do_sth).start()
Thread(target=do_sth).start()
do_sth()
"""
三个进程占满了八核CPU中的其中一核心。因此,多线程不可以并行,只能并发,交替处理问题.
"""
"""
我们编写的Python代码是通过Python解释器来执行的。通常使用的Python解释器是官方提供的CPython。CPython中有一个GIL(Global Interpreter Lock,全局解释器锁),其作用相当于Lock,任何线程在执行前必须先获得GIL,一个线程在获得GIL后其它线程就不能执行,直到该线程释放GIL。因此,GIL保证了同一时刻只有一个线程可以执行,从而导致Python中的多线程不能实现并行。 PYPY JPYTHON解释器就不会
"""
|
test_http.py | import gzip
import io
import json
import logging
import threading
import unittest
from spectator import Registry
from spectator.http import HttpClient
try:
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
# python3
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class HttpTest(unittest.TestCase):
def setUp(self):
self._server = HTTPServer(("localhost", 0), RequestHandler)
self._uri = "http://localhost:{}/path".format(self._server.server_port)
t = threading.Thread(target=self._server.serve_forever)
t.start()
def tearDown(self):
self._server.shutdown()
self._server.server_close()
def test_do_post_ok(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, '{"status": 200}', retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "2xx",
"statusCode": "200"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_404(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, '{"status": 404}', retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "4xx",
"statusCode": "404"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_429(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, '{"status": 429}', retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "4xx",
"statusCode": "429"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_503(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, '{"status": 503}', retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "5xx",
"statusCode": "503"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_bad_json(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, '{"status": ', retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "4xx",
"statusCode": "400"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_encode(self):
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, {"status": 202}, retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "2xx",
"statusCode": "202"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
def test_do_post_network_error(self):
self.tearDown()
r = Registry()
client = HttpClient(r)
client.post_json(self._uri, "{}", retry_delay=0)
tags = {
"mode": "http-client",
"method": "POST",
"client": "spectator-py",
"status": "URLError",
"statusCode": "URLError"
}
t = r.timer("http.req.complete", tags)
self.assertEqual(t.count(), 1)
class RequestHandler(BaseHTTPRequestHandler):
@staticmethod
def _compress(entity):
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(entity.encode('utf-8'))
return out.getvalue()
def do_POST(self):
try:
length = int(self.headers['Content-Length'])
entity = io.BytesIO(self.rfile.read(length))
data = json.loads(gzip.GzipFile(fileobj=entity).read().decode())
self.send_response(data["status"])
self.send_header('Content-Encoding', 'gzip')
self.end_headers()
self.wfile.write(self._compress("received: {}".format(data)))
except Exception as e:
self.send_response(400)
self.end_headers()
msg = "error processing request: {}".format(e)
self.wfile.write(msg.encode('utf-8'))
def log_message(self, format, *args):
pass
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
if '__pypy__' in sys.builtin_module_names:
raise ImportError # don't use ctypes, missing ctypes.resize()
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue # Skip subtest that uses a background thread
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom|__init__"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
@support.impl_detail("PyPy does not call __del__ at shutdown", pypy=False)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
filestatemanager.py | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' filestatemanager.py '''
import os
import threading
from collections import defaultdict
from heron.statemgrs.src.python.statemanager import StateManager
from heron.proto.execution_state_pb2 import ExecutionState
from heron.proto.packing_plan_pb2 import PackingPlan
from heron.proto.physical_plan_pb2 import PhysicalPlan
from heron.proto.scheduler_pb2 import SchedulerLocation
from heron.proto.tmanager_pb2 import TManagerLocation
from heron.proto.topology_pb2 import Topology
# pylint: disable=too-many-instance-attributes
class FileStateManager(StateManager):
"""
State manager which reads states from local file system.
This is not a production level state manager. The watches
are based on polling the file system at regular intervals.
"""
def __init__(self, name, rootpath):
self.name = name
self.rootpath = rootpath
# This is the cache of the state directories.
self.topologies_directory = {}
self.execution_state_directory = {}
self.packing_plan_directory = {}
self.pplan_directory = {}
self.tmanager_directory = {}
self.scheduler_location_directory = {}
# The watches are triggered when there
# is a corresponding change.
# The list contains the callbacks to be called
# when topologies change.
self.topologies_watchers = []
# The dictionary is from the topology name
# to the callback.
self.topology_watchers = defaultdict(lambda: [])
self.execution_state_watchers = defaultdict(lambda: [])
self.packing_plan_watchers = defaultdict(lambda: [])
self.pplan_watchers = defaultdict(lambda: [])
self.tmanager_watchers = defaultdict(lambda: [])
self.scheduler_location_watchers = defaultdict(lambda: [])
# Instantiate the monitoring thread.
self.monitoring_thread = threading.Thread(target=self.monitor)
# cancellable sleep
self.event = threading.Event()
# pylint: disable=attribute-defined-outside-init
def start(self):
""" start monitoring thread """
self.monitoring_thread_stop_signal = False
self.monitoring_thread.start()
def stop(self):
"""" stop monitoring thread """
self.monitoring_thread_stop_signal = True
self.event.set()
def monitor(self):
"""
Monitor the rootpath and call the callback
corresponding to the change.
This monitoring happens periodically. This function
is called in a seperate thread from the main thread,
because it sleeps for the intervals between each poll.
"""
def trigger_watches_based_on_files(watchers, path, directory, ProtoClass):
"""
For all the topologies in the watchers, check if the data
in directory has changed. Trigger the callback if it has.
"""
for topology, callbacks in list(watchers.items()):
file_path = os.path.join(path, topology)
data = b""
if os.path.exists(file_path):
with open(os.path.join(path, topology), "rb") as f:
data = f.read()
if topology not in directory or data != directory[topology]:
proto_object = ProtoClass()
proto_object.ParseFromString(data)
for callback in callbacks:
callback(proto_object)
directory[topology] = data
while not self.monitoring_thread_stop_signal:
topologies_path = self.get_topologies_path()
topologies = []
if os.path.isdir(topologies_path):
# pylint: disable=consider-using-generator
topologies = list([f for f in os.listdir(topologies_path)
if os.path.isfile(os.path.join(topologies_path, f))])
if set(topologies) != set(self.topologies_directory):
for callback in self.topologies_watchers:
callback(topologies)
self.topologies_directory = topologies
trigger_watches_based_on_files(
self.topology_watchers, topologies_path, self.topologies_directory, Topology)
# Get the directory name for execution state
execution_state_path = os.path.dirname(self.get_execution_state_path(""))
trigger_watches_based_on_files(
self.execution_state_watchers, execution_state_path,
self.execution_state_directory, ExecutionState)
# Get the directory name for packing_plan
packing_plan_path = os.path.dirname(self.get_packing_plan_path(""))
trigger_watches_based_on_files(
self.packing_plan_watchers, packing_plan_path, self.packing_plan_directory, PackingPlan)
# Get the directory name for pplan
pplan_path = os.path.dirname(self.get_pplan_path(""))
trigger_watches_based_on_files(
self.pplan_watchers, pplan_path,
self.pplan_directory, PhysicalPlan)
# Get the directory name for tmanager
tmanager_path = os.path.dirname(self.get_tmanager_path(""))
trigger_watches_based_on_files(
self.tmanager_watchers, tmanager_path,
self.tmanager_directory, TManagerLocation)
# Get the directory name for scheduler location
scheduler_location_path = os.path.dirname(self.get_scheduler_location_path(""))
trigger_watches_based_on_files(
self.scheduler_location_watchers, scheduler_location_path,
self.scheduler_location_directory, SchedulerLocation)
# Sleep for some time
self.event.wait(timeout=5)
def get_topologies(self, callback=None):
"""get topologies"""
if callback:
self.topologies_watchers.append(callback)
else:
topologies_path = self.get_topologies_path()
return [f for f in os.listdir(topologies_path)
if os.path.isfile(os.path.join(topologies_path, f))]
return None
def get_topology(self, topologyName, callback=None):
"""get topology"""
if callback:
self.topology_watchers[topologyName].append(callback)
else:
topology_path = self.get_topology_path(topologyName)
with open(topology_path, "rb") as f:
data = f.read()
topology = Topology()
topology.ParseFromString(data)
return topology
return None
def create_topology(self, topologyName, topology):
"""
Create path is currently not supported in file based state manager.
"""
def delete_topology(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
def get_packing_plan(self, topologyName, callback=None):
""" get packing plan """
if callback:
self.packing_plan_watchers[topologyName].append(callback)
else:
packing_plan_path = self.get_packing_plan_path(topologyName)
with open(packing_plan_path, "rb") as f:
data = f.read()
packing_plan = PackingPlan()
packing_plan.ParseFromString(data)
def get_pplan(self, topologyName, callback=None):
"""
Get physical plan of a topology
"""
if callback:
self.pplan_watchers[topologyName].append(callback)
else:
pplan_path = self.get_pplan_path(topologyName)
with open(pplan_path, "rb") as f:
data = f.read()
pplan = PhysicalPlan()
pplan.ParseFromString(data)
return pplan
return None
def create_pplan(self, topologyName, pplan):
"""
Create path is currently not supported in file based state manager.
"""
def delete_pplan(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
def get_execution_state(self, topologyName, callback=None):
"""
Get execution state
"""
if callback:
self.execution_state_watchers[topologyName].append(callback)
else:
execution_state_path = self.get_execution_state_path(topologyName)
with open(execution_state_path, "rb") as f:
data = f.read()
executionState = ExecutionState()
executionState.ParseFromString(data)
return executionState
return None
def create_execution_state(self, topologyName, executionState):
"""
Create path is currently not supported in file based state manager.
"""
def delete_execution_state(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
def get_tmanager(self, topologyName, callback=None):
"""
Get tmanager
"""
if callback:
self.tmanager_watchers[topologyName].append(callback)
else:
tmanager_path = self.get_tmanager_path(topologyName)
with open(tmanager_path, "rb") as f:
data = f.read()
tmanager = TManagerLocation()
tmanager.ParseFromString(data)
return tmanager
return None
def get_scheduler_location(self, topologyName, callback=None):
"""
Get scheduler location
"""
if callback:
self.scheduler_location_watchers[topologyName].append(callback)
else:
scheduler_location_path = self.get_scheduler_location_path(topologyName)
with open(scheduler_location_path, "rb") as f:
data = f.read()
scheduler_location = SchedulerLocation()
scheduler_location.ParseFromString(data)
return scheduler_location
return None
|
client.py | import json
import base64
import requests
import threading
from uuid import UUID
from os import urandom
from time import timezone, sleep
from typing import BinaryIO
from binascii import hexlify
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, proxies: dict = None, certificatePath = None, socket_trace = False, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
self.device_id_sig = device.device_id_sig
SocketHandler.__init__(self, self, socket_trace=socket_trace, debug=socketDebugging)
Callbacks.__init__(self, self)
self.proxies = proxies
self.certificatePath = certificatePath
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
self.check_device(self.device_id)
def parse_headers(self, data = None):
if not data:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
sleep(1)
def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = self.get_user_info(uId)
self.profile: objects.UserProfile = self.get_user_info(uId)
headers.sid = self.sid
self.start()
self.run_socket()
def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
self.run_socket()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = True
self.json = json.loads(response.text)
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
self.start()
return response.status_code
def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/register", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
self.close()
return response.status_code
def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/persona/profile/basic", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
response = requests.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: self.configured = True; return response.status_code
def get_account_info(self):
response = requests.get(f"{self.api}/g/s/account", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["account"]).UserProfile
def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
response = requests.post(f"{self.api}/g/s/media/upload", data=data, headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
def get_eventlog(self):
response = requests.get(f"{self.api}/g/s/eventlog/profile?language=en", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["communityList"]).CommunityList
def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["userInfoInCommunities"]
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def start_chat(self, userId: [str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_chat(self, userId: [str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = requests.get(url, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Community(json.loads(response.text)["community"]).Community
def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = requests.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff: response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
if viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = requests.post(url, headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def follow(self, userId: [str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def join_community(self, comId: str, invitationId: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationId** : ID of the Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationId: data["invitationId"] = invitationId
data = json.dumps(data)
response = requests.post(f"{self.api}/x{comId}/s/community/join", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/x{comId}/s/community/membership-request", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
response = requests.post(f"{self.api}/x{comId}/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["linkedCommunityList"]).CommunityList
def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["unlinkedCommunityList"]).CommunityList
def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_blog(self, blogId: [str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Membership(json.loads(response.text)).Membership
def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
response = requests.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletInfo(json.loads(response.text)["wallet"]).WalletInfo
def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletHistory(json.loads(response.text)["coinHistoryList"]).WalletHistory
def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/auid?deviceId={deviceId}")
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["auid"]
def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
- ``http://aminoapps.com/p/EXAMPLE``, the ``code`` is 'EXAMPLE'.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: response = requests.post(f"{self.api}/g/s-x{comId}/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["supportedLanguages"]
def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["storeSubscriptionItemList"]
def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
def link_identify(self, code: str):
response = requests.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return json.loads(response.text)
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_avatar_frames(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.AvatarFrameList(json.loads(response.text)["avatarFrameList"]).AvatarFrameList
|
server.py | """
Utilities for creating bokeh Server instances.
"""
import datetime as dt
import html
import inspect
import logging
import os
import pathlib
import signal
import sys
import traceback
import threading
import uuid
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial, wraps
from types import FunctionType, MethodType
from urllib.parse import urljoin, urlparse
import param
import bokeh
import bokeh.command.util
# Bokeh imports
from bokeh.application import Application as BkApplication
from bokeh.application.handlers.code import CodeHandler, _monkeypatch_io, patch_curdoc
from bokeh.application.handlers.function import FunctionHandler
from bokeh.command.util import build_single_handler_application
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.document.events import ModelChangedEvent
from bokeh.embed.bundle import Script
from bokeh.embed.elements import html_page_for_render_items, script_for_render_items
from bokeh.embed.util import RenderItem
from bokeh.io import curdoc
from bokeh.server.server import Server
from bokeh.server.urls import per_app_patterns
from bokeh.server.views.autoload_js_handler import AutoloadJsHandler as BkAutoloadJsHandler
from bokeh.server.views.doc_handler import DocHandler as BkDocHandler
from bokeh.server.views.static_handler import StaticHandler
# Tornado imports
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler
from tornado.web import RequestHandler, StaticFileHandler, authenticated
from tornado.wsgi import WSGIContainer
# Internal imports
from ..util import edit_readonly
from .logging import LOG_SESSION_CREATED, LOG_SESSION_DESTROYED, LOG_SESSION_LAUNCHING
from .profile import profile_ctx
from .reload import autoreload_watcher
from .resources import BASE_TEMPLATE, Resources, bundle_resources
from .state import set_curdoc, state
logger = logging.getLogger(__name__)
#---------------------------------------------------------------------
# Private API
#---------------------------------------------------------------------
INDEX_HTML = os.path.join(os.path.dirname(__file__), '..', '_templates', "index.html")
def _origin_url(url):
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url, port):
if url.startswith("http"):
return '%s:%d%s' % (url.rsplit(':', 1)[0], port, "/")
else:
return 'http://%s:%d%s' % (url.split(':')[0], port, "/")
def _eval_panel(panel, server_id, title, location, doc):
from ..template import BaseTemplate
from ..pane import panel as as_panel
with set_curdoc(doc):
if isinstance(panel, (FunctionType, MethodType)):
panel = panel()
if isinstance(panel, BaseTemplate):
doc = panel._modify_doc(server_id, title, doc, location)
else:
doc = as_panel(panel)._modify_doc(server_id, title, doc, location)
return doc
def async_execute(func):
"""
Wrap async event loop scheduling to ensure that with_lock flag
is propagated from function to partial wrapping it.
"""
if not state.curdoc or not state.curdoc.session_context:
ioloop = IOLoop.current()
event_loop = ioloop.asyncio_loop
if event_loop.is_running():
ioloop.add_callback(func)
else:
event_loop.run_until_complete(func())
return
if isinstance(func, partial) and hasattr(func.func, 'lock'):
unlock = not func.func.lock
else:
unlock = not getattr(func, 'lock', False)
curdoc = state.curdoc
@wraps(func)
async def wrapper(*args, **kw):
with set_curdoc(curdoc):
return await func(*args, **kw)
if unlock:
wrapper.nolock = True
state.curdoc.add_next_tick_callback(wrapper)
param.parameterized.async_executor = async_execute
def _initialize_session_info(session_context):
from ..config import config
session_id = session_context.id
sessions = state.session_info['sessions']
history = -1 if config._admin else config.session_history
if not config._admin and (history == 0 or session_id in sessions):
return
state.session_info['total'] += 1
if history > 0 and len(sessions) >= history:
old_history = list(sessions.items())
sessions = OrderedDict(old_history[-(history-1):])
state.session_info['sessions'] = sessions
sessions[session_id] = {
'launched': dt.datetime.now().timestamp(),
'started': None,
'rendered': None,
'ended': None,
'user_agent': session_context.request.headers.get('User-Agent')
}
state.param.trigger('session_info')
state.on_session_created(_initialize_session_info)
#---------------------------------------------------------------------
# Bokeh patches
#---------------------------------------------------------------------
def server_html_page_for_session(session, resources, title, template=BASE_TEMPLATE,
template_variables=None):
render_item = RenderItem(
token = session.token,
roots = session.document.roots,
use_for_title = False,
)
if template_variables is None:
template_variables = {}
bundle = bundle_resources(session.document.roots, resources)
return html_page_for_render_items(bundle, {}, [render_item], title,
template=template, template_variables=template_variables)
def autoload_js_script(doc, resources, token, element_id, app_path, absolute_url):
resources = Resources.from_bokeh(resources)
bundle = bundle_resources(doc.roots, resources)
render_items = [RenderItem(token=token, elementid=element_id, use_for_title=False)]
bundle.add(Script(script_for_render_items({}, render_items, app_path=app_path, absolute_url=absolute_url)))
return AUTOLOAD_JS.render(bundle=bundle, elementid=element_id)
# Patch Application to handle session callbacks
class Application(BkApplication):
async def on_session_created(self, session_context):
for cb in state._on_session_created:
cb(session_context)
await super().on_session_created(session_context)
def initialize_document(self, doc):
super().initialize_document(doc)
if doc in state._templates:
template = state._templates[doc]
template.server_doc(title=template.title, location=True, doc=doc)
bokeh.command.util.Application = Application
class SessionPrefixHandler:
@contextmanager
def _session_prefix(self):
prefix = self.request.uri.replace(self.application_context._url, '')
if not prefix.endswith('/'):
prefix += '/'
base_url = urljoin('/', prefix)
rel_path = '/'.join(['..'] * self.application_context._url.strip('/').count('/'))
old_url, old_rel = state.base_url, state.rel_path
# Handle autoload.js absolute paths
abs_url = self.get_argument('bokeh-absolute-url', default=None)
if abs_url is not None:
app_path = self.get_argument('bokeh-app-path', default='')
rel_path = abs_url.replace(app_path, '')
with edit_readonly(state):
state.base_url = base_url
state.rel_path = rel_path
try:
yield
finally:
with edit_readonly(state):
state.base_url = old_url
state.rel_path = old_rel
# Patch Bokeh DocHandler URL
class DocHandler(BkDocHandler, SessionPrefixHandler):
@authenticated
async def get(self, *args, **kwargs):
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
logger.info(LOG_SESSION_CREATED, id(session.document))
try:
resources = Resources.from_bokeh(self.application.resources())
page = server_html_page_for_session(
session, resources=resources, title=session.document.title,
template=session.document.template,
template_variables=session.document.template_variables
)
finally:
state.curdoc = None
self.set_header("Content-Type", 'text/html')
self.write(page)
per_app_patterns[0] = (r'/?', DocHandler)
# Patch Bokeh Autoload handler
class AutoloadJsHandler(BkAutoloadJsHandler, SessionPrefixHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
async def get(self, *args, **kwargs):
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(absolute_url))
else:
server_url = None
with self._session_prefix():
session = await self.get_session()
state.curdoc = session.document
try:
resources = Resources.from_bokeh(self.application.resources(server_url))
js = autoload_js_script(
session.document, resources, session.token, element_id,
app_path, absolute_url
)
finally:
state.curdoc = None
self.set_header("Content-Type", 'application/javascript')
self.write(js)
per_app_patterns[3] = (r'/autoload.js', AutoloadJsHandler)
def modify_document(self, doc):
from bokeh.io.doc import set_curdoc as bk_set_curdoc
from ..config import config
logger.info(LOG_SESSION_LAUNCHING, id(doc))
if config.autoreload:
path = self._runner.path
argv = self._runner._argv
handler = type(self)(filename=path, argv=argv)
self._runner = handler._runner
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc.modules._modules.append(module)
old_doc = curdoc()
bk_set_curdoc(doc)
if config.autoreload:
set_curdoc(doc)
state.onload(autoreload_watcher)
sessions = []
try:
def post_check():
newdoc = curdoc()
# Do not let curdoc track modules when autoreload is enabled
# otherwise it will erroneously complain that there is
# a memory leak
if config.autoreload:
newdoc.modules._modules = []
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
def handle_exception(handler, e):
from bokeh.application.handlers.handler import handle_exception
from ..pane import HTML
# Clean up
del sys.modules[module.__name__]
if hasattr(doc, 'modules'):
doc.modules._modules.remove(module)
else:
doc._modules.remove(module)
bokeh.application.handlers.code_runner.handle_exception = handle_exception
tb = html.escape(traceback.format_exc())
# Serve error
HTML(
f'<b>{type(e).__name__}</b>: {e}</br><pre style="overflow-y: scroll">{tb}</pre>',
css_classes=['alert', 'alert-danger'], sizing_mode='stretch_width'
).servable()
if config.autoreload:
bokeh.application.handlers.code_runner.handle_exception = handle_exception
state._launching.append(doc)
with _monkeypatch_io(self._loggers):
with patch_curdoc(doc):
with profile_ctx(config.profiler) as sessions:
self._runner.run(module, post_check)
def _log_session_destroyed(session_context):
logger.info(LOG_SESSION_DESTROYED, id(doc))
doc.on_session_destroyed(_log_session_destroyed)
finally:
state._launching.remove(doc)
if config.profiler:
try:
path = doc.session_context.request.path
state._profiles[(path, config.profiler)] += sessions
state.param.trigger('_profiles')
except Exception:
pass
bk_set_curdoc(old_doc)
CodeHandler.modify_document = modify_document
# Copied from bokeh 2.4.0, to fix directly in bokeh at some point.
def create_static_handler(prefix, key, app):
# patch
key = '/__patchedroot' if key == '/' else key
route = prefix
route += "/static/(.*)" if key == "/" else key + "/static/(.*)"
if app.static_path is not None:
return (route, StaticFileHandler, {"path" : app.static_path})
return (route, StaticHandler, {})
bokeh.server.tornado.create_static_handler = create_static_handler
#---------------------------------------------------------------------
# Public API
#---------------------------------------------------------------------
def init_doc(doc):
doc = doc or curdoc()
if not doc.session_context:
return doc
thread = threading.current_thread()
if thread:
with set_curdoc(doc):
state._thread_id = thread.ident
session_id = doc.session_context.id
sessions = state.session_info['sessions']
if session_id not in sessions:
return doc
sessions[session_id].update({
'started': dt.datetime.now().timestamp()
})
doc.on_event('document_ready', state._init_session)
return doc
def with_lock(func):
"""
Wrap a callback function to execute with a lock allowing the
function to modify bokeh models directly.
Arguments
---------
func: callable
The callable to wrap
Returns
-------
wrapper: callable
Function wrapped to execute without a Document lock.
"""
if inspect.iscoroutinefunction(func):
@wraps(func)
async def wrapper(*args, **kw):
return await func(*args, **kw)
else:
@wraps(func)
def wrapper(*args, **kw):
return func(*args, **kw)
wrapper.lock = True
return wrapper
def _dispatch_events(doc, events):
"""
Handles dispatch of events which could not be processed in
unlocked decorator.
"""
for event in events:
doc.callbacks.trigger_on_change(event)
@contextmanager
def unlocked():
"""
Context manager which unlocks a Document and dispatches
ModelChangedEvents triggered in the context body to all sockets
on current sessions.
"""
curdoc = state.curdoc
if curdoc is None or curdoc.session_context is None or curdoc.session_context.session is None:
yield
return
connections = curdoc.session_context.session._subscribed_connections
hold = curdoc.callbacks.hold_value
if hold:
old_events = list(curdoc.callbacks._held_events)
else:
old_events = []
curdoc.hold()
try:
yield
locked = False
for conn in connections:
socket = conn._socket
if hasattr(socket, 'write_lock') and socket.write_lock._block._value == 0:
locked = True
break
events = []
for event in curdoc.callbacks._held_events:
if not isinstance(event, ModelChangedEvent) or event in old_events or locked:
events.append(event)
continue
for conn in connections:
socket = conn._socket
ws_conn = getattr(socket, 'ws_connection', False)
if (not hasattr(socket, 'write_message') or
ws_conn is None or (ws_conn and ws_conn.is_closing())):
continue
msg = conn.protocol.create('PATCH-DOC', [event])
WebSocketHandler.write_message(socket, msg.header_json)
WebSocketHandler.write_message(socket, msg.metadata_json)
WebSocketHandler.write_message(socket, msg.content_json)
for header, payload in msg._buffers:
WebSocketHandler.write_message(socket, header)
WebSocketHandler.write_message(socket, payload, binary=True)
curdoc.callbacks._held_events = events
finally:
if hold:
return
try:
curdoc.unhold()
except RuntimeError:
curdoc.add_next_tick_callback(partial(_dispatch_events, curdoc, events))
def serve(panels, port=0, address=None, websocket_origin=None, loop=None,
show=True, start=True, title=None, verbose=True, location=True,
threaded=False, **kwargs):
"""
Allows serving one or more panel objects on a single server.
The panels argument should be either a Panel object or a function
returning a Panel object or a dictionary of these two. If a
dictionary is supplied the keys represent the slugs at which
each app is served, e.g. `serve({'app': panel1, 'app2': panel2})`
will serve apps at /app and /app2 on the server.
Arguments
---------
panel: Viewable, function or {str: Viewable or function}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on
show : boolean (optional, default=True)
Whether to open the server in a new browser tab on start
start : boolean(optional, default=True)
Whether to start the Server
title: str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title
verbose: boolean (optional, default=True)
Whether to print the address and port
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
threaded: boolean (default=False)
Whether to start the server on a new Thread
kwargs: dict
Additional keyword arguments to pass to Server instance
"""
kwargs = dict(kwargs, **dict(
port=port, address=address, websocket_origin=websocket_origin,
loop=loop, show=show, start=start, title=title, verbose=verbose,
location=location
))
if threaded:
from tornado.ioloop import IOLoop
kwargs['loop'] = loop = IOLoop() if loop is None else loop
server = StoppableThread(
target=get_server, io_loop=loop, args=(panels,), kwargs=kwargs
)
server_id = kwargs.get('server_id', uuid.uuid4().hex)
state._threads[server_id] = server
server.start()
else:
server = get_server(panels, **kwargs)
return server
class ProxyFallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback and
proxies the subpath.
"""
def initialize(self, fallback, proxy=None):
self.fallback = fallback
self.proxy = proxy
def prepare(self):
if self.proxy:
self.request.path = self.request.path.replace(self.proxy, '')
self.fallback(self.request)
self._finished = True
self.on_finish()
def get_static_routes(static_dirs):
"""
Returns a list of tornado routes of StaticFileHandlers given a
dictionary of slugs and file paths to serve.
"""
patterns = []
for slug, path in static_dirs.items():
if not slug.startswith('/'):
slug = '/' + slug
if slug == '/static':
raise ValueError("Static file route may not use /static "
"this is reserved for internal use.")
path = os.path.abspath(path)
if not os.path.isdir(path):
raise ValueError("Cannot serve non-existent path %s" % path)
patterns.append(
(r"%s/(.*)" % slug, StaticFileHandler, {"path": path})
)
return patterns
def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={},
oauth_provider=None, oauth_key=None, oauth_secret=None,
oauth_extra_params={}, cookie_secret=None,
oauth_encryption_key=None, session_history=None, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
oauth_provider: str
One of the available OAuth providers
oauth_key: str (optional, default=None)
The public OAuth identifier
oauth_secret: str (optional, default=None)
The client secret for the OAuth provider
oauth_extra_params: dict (optional, default={})
Additional information for the OAuth provider
cookie_secret: str (optional, default=None)
A random secret string to sign cookies (required for OAuth)
oauth_encryption_key: str (optional, default=False)
A random encryption key used for encrypting OAuth user
information and access tokens.
session_history: int (optional, default=None)
The amount of session history to accumulate. If set to non-zero
and non-None value will launch a REST endpoint at
/rest/session_info, which returns information about the session
history.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from ..config import config
from .rest import REST_PROVIDERS
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
if isinstance(app, pathlib.Path):
app = str(app) # enables serving apps from Paths
if (isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb"))
and os.path.isfile(app)):
apps[slug] = build_single_handler_application(app)
else:
handler = FunctionHandler(partial(_eval_panel, app, server_id, title_, location))
apps[slug] = Application(handler)
else:
handler = FunctionHandler(partial(_eval_panel, panel, server_id, title, location))
apps = {'/': Application(handler)}
extra_patterns += get_static_routes(static_dirs)
if session_history is not None:
config.session_history = session_history
if config.session_history != 0:
pattern = REST_PROVIDERS['param']([], 'rest')
extra_patterns.extend(pattern)
state.publish('session_info', state, ['session_info'])
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
# Configure OAuth
from ..config import config
if config.oauth_provider:
from ..auth import OAuthProvider
opts['auth_provider'] = OAuthProvider()
if oauth_provider:
config.oauth_provider = oauth_provider
if oauth_key:
config.oauth_key = oauth_key
if oauth_extra_params:
config.oauth_extra_params = oauth_extra_params
if cookie_secret:
config.cookie_secret = cookie_secret
opts['cookie_secret'] = config.cookie_secret
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
url = f"http://{address}:{server.port}{server.prefix}"
print(f"Launching server at {url}")
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/login' if config.oauth_provider else '/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
class StoppableThread(threading.Thread):
"""Thread class with a stop() method."""
def __init__(self, io_loop=None, **kwargs):
super().__init__(**kwargs)
self.io_loop = io_loop
def run(self):
if hasattr(self, '_target'):
target, args, kwargs = self._target, self._args, self._kwargs
else:
target, args, kwargs = self._Thread__target, self._Thread__args, self._Thread__kwargs
if not target:
return
bokeh_server = None
try:
bokeh_server = target(*args, **kwargs)
finally:
if isinstance(bokeh_server, Server):
try:
bokeh_server.stop()
except Exception:
pass
if hasattr(self, '_target'):
del self._target, self._args, self._kwargs
else:
del self._Thread__target, self._Thread__args, self._Thread__kwargs
def stop(self):
self.io_loop.add_callback(self.io_loop.stop)
|
docker.py | import argparse
import docker
import io
import os
import random
import requests.exceptions
import sys
import tempfile
import threading
try:
from retro import data_path
except ImportError:
def data_path():
raise RuntimeError('Could not find Gym Retro data directory')
class LogThread:
def __init__(self, container):
self._log = container.logs(stdout=True, stderr=True, stream=True)
self._thread = threading.Thread(target=self._run)
self._active = False
def start(self):
if self._active:
return
self._active = True
self._thread.start()
def exit(self):
self._active = False
def _run(self):
while self._active:
try:
print(next(self._log).decode('utf-8'), end='')
except StopIteration:
break
def convert_path(path):
if sys.platform.startswith('win') and path[1] == ':':
path = '/%s%s' % (path[0].lower(), path[2:].replace('\\', '/'))
return path
def run(game, state=None, entry=None, **kwargs):
client = docker.from_env()
remote_command = ['retro-contest-remote', 'run', game, *([state] if state else []), '-b', 'results/bk2', '-m', 'results']
remote_name = kwargs.get('remote_env', 'openai/retro-env')
agent_command = []
agent_name = kwargs.get('agent', 'agent')
datamount = {}
if kwargs.get('wallclock_limit') is not None:
remote_command.extend(['-W', str(kwargs['wallclock_limit'])])
if kwargs.get('timestep_limit') is not None:
remote_command.extend(['-T', str(kwargs['timestep_limit'])])
if kwargs.get('discrete_actions'):
remote_command.extend(['-D'])
if entry:
agent_command.append(entry)
if kwargs.get('entry_args'):
agent_command.extend(kwargs['entry_args'])
rand = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz0123456789', 8))
volname = 'retro-contest-tmp%s' % rand
datamount = {}
agentmount = {}
if kwargs.get('resultsdir'):
results = os.path.realpath(kwargs['resultsdir'])
datamount[convert_path(results)] = {'bind': '/root/compo/results'}
os.makedirs(results, exist_ok=True)
else:
results = None
if kwargs.get('agentdir'):
agentdir = os.path.realpath(kwargs['agentdir'])
agentmount[convert_path(agentdir)] = {'bind': '/root/compo/out'}
os.makedirs(agentdir, exist_ok=True)
container_kwargs = {'detach': True, 'network_disabled': True}
remote_kwargs = dict(container_kwargs)
agent_kwargs = dict(container_kwargs)
if kwargs.get('agent_shm'):
agent_kwargs['shm_size'] = kwargs['agent_shm']
bridge = client.volumes.create(volname, driver='local', driver_opts={'type': 'tmpfs', 'device': 'tmpfs'})
if kwargs.get('use_host_data'):
remote_command = [remote_command[0], '--data-dir', '/root/data', *remote_command[1:]]
datamount[convert_path(data_path())] = {'bind': '/root/data', 'mode': 'ro'}
try:
remote = client.containers.run(remote_name, remote_command,
volumes={volname: {'bind': '/root/compo/tmp'},
**datamount},
**remote_kwargs)
except:
bridge.remove()
raise
try:
agent = client.containers.run(agent_name, agent_command,
volumes={volname: {'bind': '/root/compo/tmp'},
**agentmount},
runtime=kwargs.get('runtime', 'nvidia'),
**agent_kwargs)
except:
remote.kill()
remote.remove()
bridge.remove()
raise
a_exit = None
r_exit = None
if not kwargs.get('quiet'):
log_thread = LogThread(agent)
log_thread.start()
try:
while True:
try:
a_exit = agent.wait(timeout=5)
break
except requests.exceptions.RequestException:
pass
try:
r_exit = remote.wait(timeout=5)
break
except requests.exceptions.RequestException:
pass
if a_exit is None:
try:
a_exit = agent.wait(timeout=10)
except requests.exceptions.RequestException:
agent.kill()
if r_exit is None:
try:
r_exit = remote.wait(timeout=10)
except requests.exceptions.RequestException:
remote.kill()
except:
if a_exit is None:
try:
a_exit = agent.wait(timeout=1)
except:
try:
agent.kill()
except docker.errors.APIError:
pass
if r_exit is None:
try:
r_exit = remote.wait(timeout=1)
except:
try:
remote.kill()
except docker.errors.APIError:
pass
raise
finally:
if isinstance(a_exit, dict):
a_exit = a_exit.get('StatusCode')
if isinstance(r_exit, dict):
r_exit = r_exit.get('StatusCode')
if not kwargs.get('quiet'):
log_thread.exit()
logs = {
'remote': (r_exit, remote.logs(stdout=True, stderr=False), remote.logs(stdout=False, stderr=True)),
'agent': (a_exit, agent.logs(stdout=True, stderr=False), agent.logs(stdout=False, stderr=True))
}
if results:
with open(os.path.join(results, 'remote-stdout.txt'), 'w') as f:
f.write(logs['remote'][1].decode('utf-8'))
with open(os.path.join(results, 'remote-stderr.txt'), 'w') as f:
f.write(logs['remote'][2].decode('utf-8'))
with open(os.path.join(results, 'agent-stdout.txt'), 'w') as f:
f.write(logs['agent'][1].decode('utf-8'))
with open(os.path.join(results, 'agent-stderr.txt'), 'w') as f:
f.write(logs['agent'][2].decode('utf-8'))
remote.remove()
agent.remove()
bridge.remove()
return logs
def run_args(args):
kwargs = {
'entry_args': args.args,
'wallclock_limit': args.wallclock_limit,
'timestep_limit': args.timestep_limit,
'discrete_actions': args.discrete_actions,
'resultsdir': args.results_dir,
'agentdir': args.agent_dir,
'quiet': args.quiet,
'use_host_data': args.use_host_data,
'agent_shm': args.agent_shm,
}
if args.no_nv:
kwargs['runtime'] = None
if args.agent:
kwargs['agent'] = args.agent
if args.remote_env:
kwargs['remote_env'] = args.remote_env
results = run(args.game, args.state, args.entry, **kwargs)
if results['remote'][0] or results['agent'][0]:
if results['remote'][0]:
print('Remote exited uncleanly:', results['remote'][0])
if results['agent'][0]:
print('Agent exited uncleanly', results['agent'][0])
return False
return True
def build(path, tag, install=None, pass_env=False):
from pkg_resources import EntryPoint
import tarfile
if install:
destination = 'module'
else:
destination = 'agent.py'
docker_file = ['FROM openai/retro-agent',
'COPY context %s' % destination]
if not install:
docker_file.append('CMD ["python", "-u", "/root/compo/agent.py"]')
else:
docker_file.append('RUN . ~/venv/bin/activate && pip install -e module')
valid = not any(c in install for c in ' "\\')
if pass_env:
try:
EntryPoint.parse('entry=' + install)
except ValueError:
valid = False
if not valid:
raise ValueError('Invalid entry point')
docker_file.append('CMD ["retro-contest-agent", "%s"]' % install)
else:
if not valid:
raise ValueError('Invalid module name')
docker_file.append('CMD ["python", "-u", "-m", "%s"]' % install)
print('Creating Docker image...')
docker_file_full = io.BytesIO('\n'.join(docker_file).encode('utf-8'))
client = docker.from_env()
with tempfile.NamedTemporaryFile() as f:
tf = tarfile.open(mode='w:gz', fileobj=f)
docker_file_info = tarfile.TarInfo('Dockerfile')
docker_file_info.size = len(docker_file_full.getvalue())
tf.addfile(docker_file_info, docker_file_full)
tf.add(path, arcname='context', exclude=lambda fname: fname.endswith('/.git'))
tf.close()
f.seek(0)
client.images.build(fileobj=f, custom_context=True, tag=tag, gzip=True)
print('Done!')
def build_args(args):
kwargs = {
'install': args.install,
'pass_env': args.pass_env,
}
try:
build(args.path, args.tag, **kwargs)
except docker.errors.BuildError as be:
print(*[log['stream'] for log in be.build_log if 'stream' in log])
raise
return True
def init_parser(subparsers):
parser_run = subparsers.add_parser('run', description='Run Docker containers locally')
parser_run.set_defaults(func=run_args)
parser_run.add_argument('game', type=str, help='Name of the game to run')
parser_run.add_argument('state', type=str, default=None, nargs='?', help='Name of initial state')
parser_run.add_argument('--entry', '-e', type=str, help='Name of agent entry point')
parser_run.add_argument('--args', '-A', type=str, nargs='+', help='Extra agent entry arguments')
parser_run.add_argument('--agent', '-a', type=str, help='Extra agent Docker image')
parser_run.add_argument('--wallclock-limit', '-W', type=float, default=None, help='Maximum time to run in seconds')
parser_run.add_argument('--timestep-limit', '-T', type=int, default=None, help='Maximum time to run in timesteps')
parser_run.add_argument('--no-nv', '-N', action='store_true', help='Disable Nvidia runtime')
parser_run.add_argument('--remote-env', '-R', type=str, help='Remote Docker image')
parser_run.add_argument('--results-dir', '-r', type=str, help='Path to output results')
parser_run.add_argument('--agent-dir', '-o', type=str, help='Path to mount into agent (mounted at /root/compo/out)')
parser_run.add_argument('--discrete-actions', '-D', action='store_true', help='Use a discrete action space')
parser_run.add_argument('--use-host-data', '-d', action='store_true', help='Use the host Gym Retro data directory')
parser_run.add_argument('--quiet', '-q', action='store_true', help='Disable printing agent logs')
parser_run.add_argument('--agent-shm', type=str, help='Agent /dev/shm size')
parser_build = subparsers.add_parser('build', description='Build agent Docker containers')
parser_build.set_defaults(func=build_args)
parser_build.add_argument('path', type=str, help='Path to a file or package')
parser_build.add_argument('--tag', '-t', required=True, type=str, help='Tag name for the built image')
parser_build.add_argument('--install', '-i', type=str, help='Install as a package and run specified module or entry point (if -e is specified)')
parser_build.add_argument('--pass-env', '-e', action='store_true', help='Pass preconfigured environment to entry point specified by -i')
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Run OpenAI Retro Contest support code')
parser.set_defaults(func=lambda args: parser.print_help())
init_parser(parser.add_subparsers())
args = parser.parse_args(argv)
if not args.func(args):
sys.exit(1)
if __name__ == '__main__':
main()
|
boom.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import tkinter as tk
import random
import threading
import time
def boom():
# 第1步,实例化object,建立窗口window
window = tk.Tk()
width = window.winfo_screenwidth()
height = window.winfo_screenheight()
a = random.randrange(0, width)
b = random.randrange(0, height)
# 第2步,给窗口的可视化起名字
window.title("你是一个傻狍子")
# 第3步,设定窗口的大小(长 * 宽)
window.geometry('200x50')
# 第4步,在图形界面上设定标签
label = tk.Label(window,
text="你是一个傻狍子",
bg='green',
font=('宋体', 17),
width=a,
height=b)
# 第5步,放置标签
label.pack()
# 第6步,主窗口循环显示
window.mainloop()
if __name__ == '__main__':
threads = []
for i in range(5):
t = threading.Thread(target=boom)
threads.append(t)
t.start()
time.sleep(0.3)
for t in threads:
t.join()
|
web_datasets.py | from __future__ import absolute_import, print_function, division
import logging
import numpy as np
import os
import re
import time
from base64 import b64encode
from matplotlib.figure import Figure
from matplotlib.ticker import NullLocator
from matplotlib.backends.backend_agg import FigureCanvasAgg
from threading import Thread
from tornado.escape import xhtml_escape
from six import BytesIO, string_types, ensure_str
from superman.dataset import (
VectorDataset, TrajDataset, NumericMetadata, BooleanMetadata, DateMetadata,
PrimaryKeyMetadata, LookupMetadata, CompositionMetadata, TagMetadata)
from six.moves import map
__all__ = [
'WebTrajDataset', 'WebVectorDataset', 'WebLIBSDataset',
'UploadedSpectrumDataset'
]
# Global structure for all loaded datasets.
DATASETS = dict(
Raman={}, LIBS={}, FTIR={}, NIR={}, XAS={}, XRD={}, Mossbauer={}, XRF={}
)
# Ordering for filters of various metadata types.
FILTER_ORDER = {
PrimaryKeyMetadata: 0,
LookupMetadata: 1,
BooleanMetadata: 2,
TagMetadata: 3,
DateMetadata: 4,
NumericMetadata: 5,
CompositionMetadata: 999 # should always come last
}
class _ReloadableMixin(object):
def init_load(self, loader_fn, loader_args):
# Machinery for on-demand data refresh
self.load_time = -1
self.loader_fn = loader_fn
self.loader_args = loader_args
Thread(target=self.reload, name='%s loader thread' % self).start()
def reload(self):
if self.loader_args:
mtime = max(map(_try_get_mtime, self.loader_args))
else:
mtime = 0
if mtime < self.load_time:
return
# release any existing data first
self.clear_data()
if not self.loader_fn(self, *self.loader_args):
# loader failed, remove ourselves from the registry
DATASETS[self.kind].pop(self.name, None)
return
self.load_time = mtime if mtime > 0 else time.time()
# register with the global dataset manager
DATASETS[self.kind][self.name] = self
logging.info('Successfully registered %s', self)
def x_axis_units(self):
if self.kind in ('LIBS', 'NIR'):
return 'Wavelength (nm)'
if self.kind in ('Raman', 'FTIR'):
return 'Wavenumber (1/cm)'
if self.kind in ('XAS', 'XRF'):
return 'Energy (eV)'
if self.kind == 'XRD':
return '2 Theta'
if self.kind == 'Mossbauer':
return 'Velocity (mm/s)'
return 'Unknown units'
def filter_ui(self):
# get a unique string for this dataset
ds_key = 'ds%d' % hash(str(self))
# Get HTML+JS for filters
metas = sorted(self.metadata.items(),
key=lambda t: (FILTER_ORDER[type(t[1])], t[0]))
# Add the primary key, unless it's massive.
if self.pkey is not None and self.pkey.size() < 10000:
metas.insert(0, ('pkey', self.pkey))
# Collect all the fragments
init_js, collect_js, filter_htmls = [], [], []
for key, m in metas:
full_key = ds_key + '_' + key
ijs, cjs = _get_filter_js(m, full_key)
init_js.append(ijs)
collect_js.append((key, cjs))
if isinstance(m, CompositionMetadata):
filter_htmls.extend(
_get_composition_filter_html(m, key, full_key))
else:
filter_htmls.append(_get_filter_html(m, key, full_key))
return filter_htmls, init_js, collect_js
def metadata_names(self, allowed_baseclasses=(object,)):
for key, m in self.metadata.items():
if not isinstance(m, allowed_baseclasses):
continue
if isinstance(m, CompositionMetadata):
ck = key + '$'
cn = m.display_name(key) + ': '
for k, mm in m.comps.items():
yield ck + k, cn + mm.display_name(k)
else:
yield key, m.display_name(key)
class WebTrajDataset(TrajDataset, _ReloadableMixin):
def __init__(self, name, spec_kind, loader_fn, *loader_args):
TrajDataset.__init__(self, name, spec_kind)
self.description = 'No description provided.'
self.urls = []
self.is_public = True
self.user_added = False
self.init_load(loader_fn, loader_args)
class WebVectorDataset(VectorDataset, _ReloadableMixin):
def __init__(self, name, spec_kind, loader_fn, *loader_args):
VectorDataset.__init__(self, name, spec_kind)
self.description = 'No description provided.'
self.urls = []
self.is_public = True
self.user_added = False
self.init_load(loader_fn, loader_args)
class WebLIBSDataset(WebVectorDataset):
def __init__(self, name, *args, **kwargs):
WebVectorDataset.__init__(self, name, 'LIBS', *args, **kwargs)
def set_data(self, bands, spectra, pkey=None, **metadata):
if 'si' not in metadata:
# Compute the Si ratio as a proxy for temperature
chan_ranges = (288., 288.5, 633., 635.5)
den_lo, den_hi, num_lo, num_hi = np.searchsorted(
bands, chan_ranges)
si_ratio = np.asarray(spectra[:, num_lo:num_hi].max(axis=1) /
spectra[:, den_lo:den_hi].max(axis=1))
np.maximum(si_ratio, 0, out=si_ratio)
metadata['si'] = NumericMetadata(si_ratio, display_name='Si Ratio')
# Set data as usual, with the Si ratio added
VectorDataset.set_data(self, bands, spectra, pkey=pkey, **metadata)
def view(self, **kwargs):
if 'nan_gap' not in kwargs:
# default to inserting NaNs for LIBS data
kwargs['nan_gap'] = 1
return VectorDataset.view(self, **kwargs)
class UploadedSpectrumDataset(TrajDataset):
def __init__(self, name, traj):
TrajDataset.__init__(self, name, '<unknown>')
# do some cleanup on the spectrum
if traj[0, 0] > traj[1, 0]:
traj = traj[::-1]
data = {name: traj.astype(np.float32, order='C')}
self.set_data([name], data)
def _try_get_mtime(filepath):
if not isinstance(filepath, string_types):
return -1
try:
return os.path.getmtime(filepath)
except OSError:
return -1
def _generate_histogram(m):
# Make a 350px by 32px image for a slider background
fig = Figure(figsize=(3.5, 0.32), dpi=100, tight_layout=False)
# This is required, even though we don't explicitly use the canvas.
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
vrange = float(m.bounds[1] - m.bounds[0])
num_bins = np.ceil(vrange / m.step) + 1
arr = m.arr
if m.has_nan:
arr = arr[np.isfinite(arr)]
if np.isnan(num_bins):
ax.hist(arr)
elif num_bins > 300:
ax.hist(arr, 300, range=m.bounds)
else:
ax.hist(arr, int(num_bins), range=m.bounds)
ax.axis('off')
fig.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
ax.margins(0, 0)
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
# save it as a base64 encoded string
img_data = BytesIO()
fig.savefig(img_data, format='png', bbox_inches='tight', pad_inches=0)
return ensure_str(b64encode(img_data.getvalue()))
def _get_filter_js(m, full_key):
if isinstance(m, BooleanMetadata):
return '', '$("#%s").val()' % full_key
if isinstance(m, NumericMetadata):
lb, ub = m.bounds
# note that %.17g is a no-go here, because scientific formatted floats
# break the jqueryui slider lib we're using.
init_js = ('slider_init("%s", %.17f, %.17f, %.17f, %d);' % (
full_key, lb, ub, m.step, m.has_nan))
collect_js = 'slider_values("#%s_label")' % full_key
return init_js, collect_js
if isinstance(m, DateMetadata):
return '', '[$("#%s_lb").val(),$("#%s_ub").val()]' % (
full_key, full_key)
if isinstance(m, CompositionMetadata):
init_parts, collect_parts = [], []
for k, mm in m.comps.items():
ijs, cjs = _get_filter_js(mm, full_key + '-' + k)
init_parts.append(ijs)
collect_parts.append('"%s": %s' % (k, cjs))
collect_js = '{' + ','.join(collect_parts) + '}'
return '\n'.join(init_parts), collect_js
# only fancy selects remain (Lookup/PrimaryKey/Tag)
# initialize the dropdown, adding some width for the scrollbar
init_js = '$("#%s_chooser").css("width", "+=20").select2();' % full_key
collect_js = 'multi_val($("#%s_chooser option:selected"))' % full_key
if isinstance(m, (LookupMetadata, PrimaryKeyMetadata)):
search_js = '$("#%s_search").val()' % full_key
collect_js = '{select: %s, search: %s}' % (collect_js, search_js)
return init_js, collect_js
def _numeric_filter_html(display_name, full_key, lb, ub, hist_image):
# TODO: convert this to a Jinja template.
html = '''
<div>{display_name}:
<span id="{full_key}_label">
<span>{lb}</span> to <span>{ub}</span>
</span>
</div>
<div class="slider" id="{full_key}"
style="background-image: url(data:img/png;base64,{hist_image});">
</div>
'''.format(**locals())
return re.sub(r'\s+', ' ', html)
def _get_filter_html(m, key, full_key):
disp = m.display_name(key)
if isinstance(m, BooleanMetadata):
return ('%s: <select id="%s"><option value=both>Both</option>'
'<option value=yes>Yes</option><option value=no>No</option>'
'</select>') % (disp, full_key)
if isinstance(m, NumericMetadata):
lb, ub = m.bounds
# lazy load histogram
if not hasattr(m, 'hist_image'):
m.hist_image = _generate_histogram(m)
return _numeric_filter_html(disp, full_key, lb, ub, m.hist_image)
if isinstance(m, DateMetadata):
lb, ub = list(map(str, np.array(m.bounds, dtype='datetime64[D]')))
lb_input = '<input type="date" id="%s_lb" value="%s">' % (full_key, lb)
ub_input = '<input type="date" id="%s_ub" value="%s">' % (full_key, ub)
return '%s:<div>%s to %s</div>' % (disp, lb_input, ub_input)
# only fancy selects remain (Lookup/PrimaryKey/Tag)
html = u'%s:<select id="%s_chooser" data-placeholder="All" multiple>' % (
disp, full_key)
if isinstance(m, PrimaryKeyMetadata):
uniques = sorted(m.keys)
elif isinstance(m, TagMetadata):
uniques = sorted(m.tags)
else:
uniques = m.uniques
lines = (u'\n<option value="%s">%s</option>' % (x, xhtml_escape(x))
for x in uniques)
html += u''.join(lines) + u'\n</select>'
if isinstance(m, (LookupMetadata, PrimaryKeyMetadata)):
html += '\n<input type="text" placeholder="search" id="%s_search">' % (
full_key)
return html
def _get_composition_filter_html(m, key, full_key):
html_parts = []
for k, m in m.comps.items():
html_parts.append(_get_filter_html(
m, key + '-' + k, full_key + '-' + k))
return html_parts
|
classify_tpu_standlone.py |
# Copyright (c) HP-NTU Digital Manufacturing Corporate Lab, Nanyang Technological University, Singapore.
#
# This source code is licensed under the Apache-2.0 license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
import os
import time
from collections import OrderedDict
import numpy as np
import accuracy
import json
import sys
import logging
from power import serialUtil
from multiprocessing import Process
import threading
logger = logging.getLogger()
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(filename="latency_summary.txt", filemode="a", format="%(asctime)s--%(levelname)s--%(message)s", datefmt=DATE_FORMAT)
logger.setLevel(logging.INFO)
alive = True
def cifarnet_preprocessing():
return
def lenet_preprocessing():
return
def get_preprocessing(name):
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v3_small': inception_preprocessing,
'mobilenet_v3_large': inception_preprocessing,
'mobilenet_v3_small_minimalistic': inception_preprocessing,
'mobilenet_v3_large_minimalistic': inception_preprocessing,
'mobilenet_edgetpu': inception_preprocessing,
'mobilenet_edgetpu_075': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
'mnasnet_b1': inception_preprocessing
}
return preprocessing_fn_map[name]
def central_crop(image: Image, central_fraction: float):
# image is PIL Image Format
img_h = image.size[1]
img_w = image.size[0]
bbox_h_start = int((1.0 * img_h - img_h * central_fraction) / 2)
bbox_w_start = int((1.0 * img_w - img_w * central_fraction) / 2)
bbox_h_size = img_h - bbox_h_start * 2
bbox_w_size = img_w - bbox_w_start * 2
bbox = (bbox_w_start, bbox_h_start, bbox_w_start + bbox_w_size, bbox_h_start + bbox_h_size)
return image.crop(bbox)
def inception_preprocessing(image: Image, height: int, width: int, central_fraction=0.875):
image = central_crop(image, central_fraction)
if height and width:
image = image.resize((width, height), Image.BILINEAR)
return image
def vgg_preprocessing(image: Image, height: int, width: int, resize_side=256):
img_h = image.size[1]
img_w = image.size[0]
if img_h > img_w:
scale = 1.0 * resize_side / img_w
else:
scale = 1.0 * resize_side / img_h
new_height = int(img_h * scale)
new_width = int(img_w * scale)
image = image.resize((new_width, new_height), Image.BILINEAR)
offset_height = (new_height - height) / 2
offset_width = (new_width - width) / 2
image = image.crop((offset_width, offset_height, offset_width + width, offset_height + height))
return image
def power():
# Initialize the serial port
su = serialUtil.SerialBlueTooth("/dev/rfcomm0")
su.connect()
# Read the data
with open("power_results.txt", 'w') as wf:
while alive:
wf.write(str(su.read())+'\n')
def main():
global alive
parser = argparse.ArgumentParser()
parser.add_argument('--data', help='Dataset Path', type=str, required=True)
parser.add_argument('--model', help='File path of Tflite model.', type=str, required=True)
parser.add_argument('--number', help='Running number to test.', type=int, required=True)
parser.add_argument('--label', type=str, help="real label path", required=True)
parser.add_argument('--modelname', type=str, help="model name", required=True)
args = parser.parse_args()
# Initialize engine
lm_start = time.time()
engine = ClassificationEngine(args.model)
lm_end = time.time()
input_shape = engine.get_input_tensor_shape()
image_files = {}
i = 0
# Force the order of images read as the number order in original dataset (ImageNet)
for filett in os.listdir(args.data):
image_files[i] = filett
i += 1
print("Total {0} images are tested".format(len(image_files)))
ori_total_infer = 0
total_save_time = 0
logger.info("Running " + args.model + " for " + str(args.number) + " begins")
p = threading.Thread(target=power)
p.start()
total_start = time.time()
# Run inference.
with open("temp_result", 'w') as wf:
with open("temp_result_5", 'w') as wf1:
for i in range(args.number):
for key in image_files:
image_t = Image.open(args.data + '/' + image_files[key])
if image_t.mode == 'L':
image_t = image_t.convert("RGB")
# To resize the image
preprocess_t = get_preprocessing(args.modelname)
image_t = preprocess_t(image_t, input_shape[1], input_shape[2])
# Execute the engine
results = engine.classify_with_image(image_t, top_k=1, threshold=1e-10)
# Get the inference time
origin_inf = engine.get_inference_time()
# logger.info("Iteration " + str(i) + " runs " + str(origin_inf) + " ms")
ori_total_infer = ori_total_infer + origin_inf
save_begin = time.time()
for result in results:
wf.write(image_files[key] + ' ' + str(result[0]) + '\n')
results = engine.classify_with_image(image_t, top_k=5, threshold=1e-10)
for result in results:
wf1.write(image_files[key] + ' ' + str(result[0]) + '\n')
save_end = time.time()
total_save_time = total_save_time + save_end - save_begin
end_time = time.time()
alive = False
p.join()
print("Total time taken {0} seconds".format(end_time - total_start))
print("Loading model time taken {0} seconds".format(lm_end - lm_start))
print("Total inference time {0} seconds".format(ori_total_infer/1000))
logger.info("Per image inference runs {0} ms".format(ori_total_infer/args.number))
logger.info("Running " + args.model + " finishes")
with open("power_results.txt", 'r') as rf:
line = rf.readline()
count = 0
temp = 0.0
while line:
line = line.strip()
if line == "None":
line = rf.readline()
continue
else:
count += 1
temp += float(line)
line = rf.readline()
print("Average power is {}".format(temp / count))
# print("Average power is 1.0")
# print("Total save time {0} seconds".format(total_save_time))
print("Top-1 accuracy:", end='')
accuracy.accuracy(args.label, "temp_result", len(image_files))
print("Top-5 accuracy:", end='')
accuracy.accuracy(args.label, "temp_result_5", len(image_files))
if __name__ == '__main__':
main()
|
monitors.py | from __future__ import absolute_import
import logging
import threading
from .sleeper import Sleeper
log = logging.getLogger(__name__)
DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT = 5
class Monitors(object):
def _init_monitor_thread(self, name, target_name=None, target=None, start=False, config=None):
self.monitor_join_sleep = getattr(config, "monitor_thread_join_timeout", DEFAULT_MONITOR_THREAD_JOIN_TIMEOUT)
self.monitor_join = self.monitor_join_sleep > 0
self.monitor_sleeper = Sleeper()
self.monitor_running = True
if target is not None:
assert target_name is None
monitor_func = target
else:
target_name = target_name or "monitor"
monitor_func = getattr(self, target_name)
self.sleeper = Sleeper()
self.monitor_thread = threading.Thread(name=name, target=monitor_func)
self.monitor_thread.setDaemon(True)
if start:
self.monitor_thread.start()
def stop_monitoring(self):
self.monitor_running = False
def _monitor_sleep(self, sleep_amount):
self.sleeper.sleep(sleep_amount)
def shutdown_monitor(self):
self.stop_monitoring()
self.sleeper.wake()
if self.monitor_join:
log.debug("Joining monitor thread")
self.monitor_thread.join(self.monitor_join_sleep)
|
multiprocessing_class.py | from multiprocessing import Process
import os
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join() |
test_weakref.py | import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
res = []
with self.assertWarns(DeprecationWarning):
f = weakref.finalize(obj=a, func=fin, arg=1)
self.assertEqual(f.peek(), (a, fin, (), {'arg': 1}))
f()
self.assertEqual(res, [((), {'arg': 1})])
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
serial_comms.py | #!/usr/bin/env python
""" Low-level serial communications handling """
import sys, threading, logging
import re
import serial # pyserial: http://pyserial.sourceforge.net
from .exceptions import TimeoutException
from . import compat # For Python 2.6 compatibility
class SerialComms(object):
""" Wraps all low-level serial communications (actual read/write operations) """
log = logging.getLogger('gsmmodem.serial_comms.SerialComms')
# End-of-line read terminator
RX_EOL_SEQ = '\r\n'
# End-of-response terminator
RESPONSE_TERM = re.compile(r'^OK|ERROR|(\+CM[ES] ERROR: \d+)|(COMMAND NOT SUPPORT)$')
# Default timeout for serial port reads (in seconds)
timeout = 1
def __init__(self, port, baudrate=115200, notifyCallbackFunc=None, fatalErrorCallbackFunc=None, *args, **kwargs):
""" Constructor
@param fatalErrorCallbackFunc: function to call if a fatal error occurs in the serial device reading thread
@type fatalErrorCallbackFunc: func
"""
self.alive = False
self.port = port
self.baudrate = baudrate
self._responseEvent = None # threading.Event()
self._expectResponseTermSeq = None # expected response terminator sequence
self._response = None # Buffer containing response to a written command
self._notification = [] # Buffer containing lines from an unsolicited notification from the modem
# Reentrant lock for managing concurrent write access to the underlying serial port
self._txLock = threading.RLock()
self.notifyCallback = notifyCallbackFunc or self._placeholderCallback
self.fatalErrorCallback = fatalErrorCallbackFunc or self._placeholderCallback
def connect(self):
""" Connects to the device and starts the read thread """
self.serial = serial.Serial(port=self.port, baudrate=self.baudrate, timeout=self.timeout)
# Start read thread
self.alive = True
self.rxThread = threading.Thread(target=self._readLoop)
self.rxThread.daemon = True
self.rxThread.start()
def close(self):
""" Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port """
self.alive = False
self.rxThread.join()
self.serial.close()
def _handleLineRead(self, line, checkForResponseTerm=True):
#print 'sc.hlineread:',line
if self._responseEvent and not self._responseEvent.is_set():
# A response event has been set up (another thread is waiting for this response)
self._response.append(line)
if not checkForResponseTerm or self.RESPONSE_TERM.match(line):
# End of response reached; notify waiting thread
#print 'response:', self._response
self.log.debug('response: %s', self._response)
self._responseEvent.set()
else:
# Nothing was waiting for this - treat it as a notification
self._notification.append(line)
if self.serial.inWaiting() == 0:
# No more chars on the way for this notification - notify higher-level callback
#print 'notification:', self._notification
self.log.debug('notification: %s', self._notification)
self.notifyCallback(self._notification)
self._notification = []
def _placeholderCallback(self, *args, **kwargs):
""" Placeholder callback function (does nothing) """
def _readLoop(self):
""" Read thread main loop
Reads lines from the connected device
"""
try:
readTermSeq = list(self.RX_EOL_SEQ)
readTermLen = len(readTermSeq)
rxBuffer = []
while self.alive:
data = self.serial.read(1)
if data != '': # check for timeout
#print >> sys.stderr, ' RX:', data,'({0})'.format(ord(data))
rxBuffer.append(data)
if rxBuffer[-readTermLen:] == readTermSeq:
# A line (or other logical segment) has been read
line = ''.join(rxBuffer[:-readTermLen])
rxBuffer = []
if len(line) > 0:
#print 'calling handler'
self._handleLineRead(line)
elif self._expectResponseTermSeq:
if rxBuffer[-len(self._expectResponseTermSeq):] == self._expectResponseTermSeq:
line = ''.join(rxBuffer)
rxBuffer = []
self._handleLineRead(line, checkForResponseTerm=False)
#else:
#' <RX timeout>'
except serial.SerialException as e:
self.alive = False
try:
self.serial.close()
except Exception: #pragma: no cover
pass
# Notify the fatal error handler
self.fatalErrorCallback(e)
def write(self, data, waitForResponse=True, timeout=5, expectedResponseTermSeq=None):
with self._txLock:
if waitForResponse:
if expectedResponseTermSeq:
self._expectResponseTermSeq = list(expectedResponseTermSeq)
self._response = []
self._responseEvent = threading.Event()
self.serial.write(data)
if self._responseEvent.wait(timeout):
self._responseEvent = None
self._expectResponseTermSeq = False
return self._response
else: # Response timed out
self._responseEvent = None
self._expectResponseTermSeq = False
raise TimeoutException()
else:
self.serial.write(data)
|
ontology_annotatorServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from ontology_annotator.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ontology_annotator'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ontology_annotator.ontology_annotatorImpl import ontology_annotator # noqa @IgnorePep8
impl_ontology_annotator = ontology_annotator(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ontology_annotator'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ontology_annotator.run_ontology-annotator,
name='ontology_annotator.run_ontology-annotator',
types=[dict])
self.method_authentication['ontology_annotator.run_ontology-annotator'] = 'required' # noqa
self.rpc_service.add(impl_ontology_annotator.status,
name='ontology_annotator.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ontology_annotator ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
cache_race_repro.py | import time
import hashlib
import argparse
import os
import sys
import random
import logging
from cachetools import cached, TTLCache
from threading import Lock, Thread
NUM_THREADS = 4
if os.getenv("NUM_THREADS"):
NUM_THREADS = int(os.getenv("NUM_THREADS"))
NUM_RUNS = 100
if os.getenv("NUM_RUNS"):
NUM_THREADS = int(os.getenv("NUM_RUNS"))
LOCK = None
if os.getenv("LOCK_CACHE"):
LOCK = Lock()
KEYS = ["key%s" % str(i) for i in range(8)]
@cached(cache=TTLCache(maxsize=4, ttl=1), lock=LOCK)
def generate(key):
return hashlib.md5((str(key) + str(time.time())).encode())
def run(worker_id):
i = 0
while i < NUM_RUNS:
key = KEYS[random.randint(0, 7)]
value = generate(key)
logging.info("Worker %s run %s (%s, %s)" % (worker_id, i, key, value))
time.sleep(0.1)
i += 1
def main():
threads = []
for i in range(NUM_THREADS):
t = Thread(target=run, args=(i,))
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
main()
|
util.py | # -*- coding: utf-8 eval: (yapf-mode 1) -*-
#
# January 21 2019, Christian E. Hopps <chopps@labn.net>
#
# Copyright (c) 2019, LabN Consulting, L.L.C.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes
import time
import logging
import threading
logger = logging.getLogger(__file__)
SEC_NANOSECS = 1000000000
try:
clock_gettime_ns = time.clock_gettime_ns
except AttributeError:
def clock_gettime_ns(clock):
fp = time.clock_gettime(clock)
ns = int(fp * SEC_NANOSECS)
return ns
def monotonic_ns():
return clock_gettime_ns(time.CLOCK_MONOTONIC)
def monotonic():
return time.clock_gettime(time.CLOCK_MONOTONIC)
class Timestamp:
"""A way to track the lifetime left of an object"""
def __init__(self):
self.timestamp = monotonic()
def reset(self):
self.timestamp = monotonic()
def elapsed(self):
return monotonic() - self.timestamp
class RunningAverage:
def __init__(self, runlen, defval=int(0), avgf=None):
self.runlen = runlen
self.values = [defval] * runlen
self.index = 0
self.ticks = 0
if avgf is None:
self.avgf = lambda l: sum(l) / len(l)
else:
self.avgf = avgf
self.average = self.avgf(self.values)
def add_value(self, value):
"""add_value adds a new value to the running average.
Returns True if a full run has occurred.
"""
self.values[self.index] = value
self.index += 1
rv = False
if self.index == self.runlen:
self.ticks += 1
self.index = 0
rv = True
self.average = self.avgf(self.values)
return rv
class Limit:
def __init__(self, rate: int, overhead: int, count: int):
self.rate = rate / 8
self.overhead = overhead
self.count = count
self.pkttimes = [(0, 0) for x in range(0, count)]
self.totb = 0
self.pktidx = 0
self.dropcnt = 0
def limit(self, n: int):
# Size of all packets currently in the RXQ
n -= self.overhead
otime = self.pkttimes[self.pktidx][1]
ntotb = self.totb + n - self.pkttimes[self.pktidx][0]
ntime = time.perf_counter()
if otime:
delta = ntime - otime
rate = ntotb / delta
else:
rate = 0
if rate > self.rate:
self.dropcnt += 1
return True
# if otime and int(ntime) != int(otime):
# logger.info("read_packets: RXRate: %f dropcnt %d on %s",
# rxrate, dropcnt, ring.name)
self.totb = ntotb
self.pkttimes[self.pktidx] = (n, ntime)
self.pktidx = (self.pktidx + 1) % self.count
return False
class Periodic:
def __init__(self, rate: float):
# self.timestamp = time.time_ns()
self.timestamp = time.time()
self.ival = rate
def wait(self):
now = time.time()
delta = now - self.timestamp
waittime = self.ival - delta
if waittime < 0:
self.timestamp = now
if waittime != 0:
logging.info("Overran periodic timer by %f seconds", -waittime)
else:
# logging.debug("Waiting: %s", str(self.ival - delta))
time.sleep(self.ival - delta)
# logging.debug("Waking up!")
self.timestamp = time.time()
return True
class PeriodicPPS:
def __init__(self, pps: int):
# self.timestamp = time.time_ns()
self.ival_lock = threading.Lock()
self.timestamp = time.time()
self.pps = pps
self.ival = 1.0 / pps
def change_rate(self, pps: int):
with self.ival_lock:
if pps != self.pps:
self.pps = pps
self.ival = 1.0 / pps
return True
return False
def wait(self):
with self.ival_lock:
ival = self.ival
now = time.time()
delta = now - self.timestamp
waittime = ival - delta
if waittime < 0:
self.timestamp = now
if waittime != 0:
logging.info("Overran periodic timer by %f seconds", -waittime)
else:
# logging.debug("Waiting: %s", str(self.ival - delta))
time.sleep(ival - delta)
# logging.debug("Waking up!")
self.timestamp = time.time()
return True
def waitspin(self):
with self.ival_lock:
expire = self.timestamp + self.ival
now = time.time()
if now > expire:
logging.info("Overran periodic timer by %f seconds", now - expire)
else:
while time.sleep(0):
now = time.time()
if now > expire:
break
self.timestamp = now
return True
class PeriodicSignal:
def __init__(self, name: str, rate: int):
self.cv = threading.Condition()
self._periodic = Periodic(rate)
self._thread = threading.Thread(name=name, target=self._periodic_signal)
self._thread.daemon = True
def start(self):
self._thread.start()
def _periodic_signal(self):
while self._periodic.wait():
with self.cv:
self.cv.notifyAll()
__author__ = 'Christian E. Hopps'
__date__ = 'January 21 2019'
__version__ = '1.0'
__docformat__ = "restructuredtext en"
|
lambda_executors.py | import os
import re
import json
import time
import logging
import threading
import subprocess
# from datetime import datetime
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
# for Python 2.7
from pipes import quote as cmd_quote
from localstack import config
from localstack.utils.common import run, TMP_FILES, short_uid, save_file, to_str, cp_r
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_EVENT_FILE = 'event_file.json'
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME = 600
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the execute method """
def __init__(self):
pass
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, env_vars={}, asynchronous=False):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
if asynchronous:
result = '{"asynchronous": "%s"}' % asynchronous
log_output = 'Lambda executed asynchronously'
else:
result, log_output = process.communicate()
result = to_str(result)
log_output = to_str(log_output)
return_code = process.returncode
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
# holds information about an existing container.
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(event)
event_body_escaped = event_body.replace("'", "\\'")
docker_host = config.DOCKER_HOST_FROM_CONTAINER
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body_escaped
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
# copy executor jar into temp directory
cp_r(LAMBDA_EXECUTOR_JAR, lambda_cwd)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java -cp .:`ls *.jar | tr \"\\n\" \":\"` \"%s\" \"%s\" \"%s\"'" %
(taskdir, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, environment, asynchronous)
LOG.debug('Lambda result / log output:\n%s\n>%s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# set the invocation time
self.function_invoke_times[func_arn] = time.time()
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before:
# if this is the first invocation: copy the entire folder into the container
copy_command = 'docker cp "%s/." "%s:/var/task"; ' % (lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = 'docker cp "%s" "%s:/var/task"; ' % (event_file, container_info.name)
cmd = (
'%s' # copy files command
'docker exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, exec_env_vars, container_info.name, command)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'docker create'
' --rm'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' lambci/lambda:%s'
) % (container_name, env_vars_str, network_str, runtime)
LOG.debug(cmd)
run(cmd)
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'docker cp'
' "%s/." "%s:/var/task"'
) % (lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = 'docker start %s' % (container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
cmd = (
'docker image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' lambci/lambda:%s'
) % (runtime)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'docker stop -t0 %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'docker rm %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = 'docker ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"'
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = 'docker rm -f %s' % container_name
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ('docker ps -a --filter name=\'%s\' '
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'docker inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = time.time()
for func_arn, last_run_time in self.function_invoke_times.items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(docker create'
' %s'
' %s'
' %s' # network
' "lambci/lambda:%s" %s'
')";'
'docker cp "%s/." "$CONTAINER_ID:/var/task";'
'docker start -a "$CONTAINER_ID";'
) % (entrypoint, env_vars_string, network_str, runtime, command, lambda_cwd)
else:
lambda_cwd_on_host = self.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'docker run'
'%s -v "%s":/var/task'
' %s'
' %s' # network
' --rm'
' "lambci/lambda:%s" %s'
) % (entrypoint, lambda_cwd_on_host, env_vars_string, network_str, runtime, command)
return cmd
def get_host_path_for_path_in_docker(self, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
class LambdaExecutorLocal(LambdaExecutor):
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
process.run()
result = queue.get()
# TODO capture log output during local execution?
log_output = ''
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
asynchronous = False
# flip asynchronous flag depending on origin
if 'Records' in event:
# TODO: add more event supporting asynchronous lambda execution
if 'Sns' in event['Records'][0]:
asynchronous = True
if 'dynamodb' in event['Records'][0]:
asynchronous = True
result, log_output = self.run_lambda_executor(cmd, asynchronous=asynchronous)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
test_search_by_id.py | import pdb
import copy
import struct
import pytest
import threading
import datetime
import logging
from time import sleep
from multiprocessing import Process
import numpy
from milvus import IndexType, MetricType
from utils import *
dim = 128
table_id = "test_search"
add_interval_time = 2
vectors = gen_vectors(6000, dim)
# vectors /= numpy.linalg.norm(vectors)
# vectors = vectors.tolist()
nprobe = 1
epsilon = 0.001
tag = "overallpaper"
non_exist_id = 9527
small_size = 2500
raw_vectors, binary_vectors = gen_binary_vectors(6000, dim)
class TestSearchById:
def init_data(self, connect, table, nb=6000):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
status, ids = connect.add_vectors(table, add_vectors)
sleep(add_interval_time)
return add_vectors, ids
def init_data_no_flush(self, connect, table, nb=6000):
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
status, ids = connect.add_vectors(table, add_vectors)
# sleep(add_interval_time)
return add_vectors, ids
def init_data_no_flush_ids(self, connect, table, nb=6000):
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
my_ids = [i for i in range(nb)]
status, ids = connect.add_vectors(table, add_vectors, my_ids)
# sleep(add_interval_time)
return add_vectors, ids
def init_data_ids(self, connect, table, nb=6000):
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
my_ids = [i for i in range(nb)]
status, ids = connect.add_vectors(table, add_vectors, my_ids)
sleep(add_interval_time)
return add_vectors, ids
def add_data(self, connect, table, vectors):
'''
Add specified vectors to table
'''
status, ids = connect.add_vectors(table, vectors)
# sleep(add_interval_time)
sleep(10)
return vectors, ids
def add_data_ids(self, connect, table, vectors):
my_ids = [i for i in range(len(vectors))]
status, ids = connect.add_vectors(table, vectors, my_ids)
sleep(add_interval_time)
return vectors, ids
def add_data_and_flush(self, connect, table, vectors):
status, ids = connect.add_vectors(table, vectors)
connect.flush([table])
return vectors, ids
def add_data_and_flush_ids(self, connect, table, vectors):
my_ids = [i for i in range(len(vectors))]
status, ids = connect.add_vectors(table, vectors, my_ids)
connect.flush([table])
return vectors, ids
def add_data_no_flush(self, connect, table, vectors):
'''
Add specified vectors to table
'''
status, ids = connect.add_vectors(table, vectors)
return vectors, ids
def add_data_no_flush_ids(self, connect, table, vectors):
my_ids = [i for i in range(len(vectors))]
status, ids = connect.add_vectors(table, vectors, my_ids)
return vectors, ids
# delete data and auto flush - timeout due to the flush interval in config file
def delete_data(self, connect, table, ids):
'''
delete vectors by id
'''
status = connect.delete_by_id(table, ids)
sleep(add_interval_time)
return status
# delete data and auto flush - timeout due to the flush interval in config file
def delete_data_no_flush(self, connect, table, ids):
'''
delete vectors by id
'''
status = connect.delete_by_id(table, ids)
return status
# delete data and manual flush
def delete_data_and_flush(self, connect, table, ids):
'''
delete vectors by id
'''
status = connect.delete_by_id(table, ids)
connect.flush([table])
return status
def check_no_result(self, results):
if len(results) == 0:
return True
flag = True
for r in results:
flag = flag and (r.id == -1)
if not flag:
return False
return flag
def init_data_partition(self, connect, table, partition_tag, nb=6000):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
# add_vectors /= numpy.linalg.norm(add_vectors)
# add_vectors = add_vectors.tolist()
status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tag)
sleep(add_interval_time)
return add_vectors, ids
def init_data_and_flush(self, connect, table, nb=6000):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
# add_vectors /= numpy.linalg.norm(add_vectors)
# add_vectors = add_vectors.tolist()
status, ids = connect.add_vectors(table, add_vectors)
connect.flush([table])
return add_vectors, ids
def init_data_and_flush_ids(self, connect, table, nb=6000):
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
my_ids = [i for i in range(nb)]
status, ids = connect.add_vectors(table, add_vectors, my_ids)
connect.flush([table])
return add_vectors, ids
def init_data_partition_and_flush(self, connect, table, partition_tag, nb=6000):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
# add_vectors /= numpy.linalg.norm(add_vectors)
# add_vectors = add_vectors.tolist()
status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tag)
connect.flush([table])
return add_vectors, ids
def init_binary_data(self, connect, table, nb=6000, insert=True):
'''
Generate vectors and add it in table, before search vectors
'''
ids = []
global binary_vectors
global raw_vectors
if nb == 6000:
add_vectors = binary_vectors
add_raw_vectors = raw_vectors
else:
add_raw_vectors, add_vectors = gen_binary_vectors(nb, dim)
# add_vectors /= numpy.linalg.norm(add_vectors)
# add_vectors = add_vectors.tolist()
if insert is True:
status, ids = connect.add_vectors(table, add_vectors)
sleep(add_interval_time)
return add_raw_vectors, add_vectors, ids
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index_params()
)
def get_index_params(self, request, connect):
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
if str(connect._cmd("mode")[1]) == "GPU":
config = eval(connect._cmd("get_config *")[1])
if config['gpu_resource_config']['enable'] == 'true':
pytest.skip("GPU search resource enable not support yet")
if config['gpu_resource_config']['enable'] == 'false':
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support yet")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_simple_index_params(self, request, connect):
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
if str(connect._cmd("mode")[1]) == "GPU":
config = eval(connect._cmd("get_config *")[1])
if config['gpu_resource_config']['enable'] == 'true':
pytest.skip("GPU search resource enable not support yet")
if config['gpu_resource_config']['enable'] == 'false':
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support yet")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_jaccard_index_params(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index_params()
)
def get_hamming_index_params(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 99, 1024, 2048, 2049]
)
def get_top_k(self, request):
yield request.param
# auto flush
def test_search_top_k_flat_index_1(self, connect, table, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vector id, check the result
expected: search status ok, and the length of the result is top_k
'''
vectors, ids = self.init_data(connect, table, nb=small_size)
query_id = ids[0]
# logging.getLogger().info(ids[:10])
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# manual flush
def test_search_top_k_flat_index_2(self, connect, table, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vector id, check the result
expected: search status ok, and the length of the result is top_k
'''
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
query_id = ids[0]
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# manual flush
def test_search_top_k_flat_index_4(self, connect, table, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vector id, check the result
expected: search status ok, and the length of the result is top_k
'''
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
query_id = non_exist_id
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
else:
assert not status.OK()
# auto flush
def test_search_top_k_flat_index_3(self, connect, table, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vector id, check the result
expected: search status ok, and the length of the result is top_k
'''
vectors, ids = self.init_data(connect, table, nb=small_size)
query_id = non_exist_id
# logging.getLogger().info(ids[:10])
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
else:
assert not status.OK()
# auto flush
def test_search_top_k_flat_index_id_1(self, connect, table, get_top_k):
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
query_id = ids[0]
logging.getLogger().info(ids[:10])
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# manual flush
def test_search_top_k_flat_index_id_2(self, connect, table, get_top_k):
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
query_id = ids[0]
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# auto flush
def test_search_top_k_flat_index_id_3(self, connect, table, get_top_k):
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
query_id = non_exist_id
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
assert self.check_no_result(result[0])
else:
assert not status.OK()
# manual flush
def test_search_top_k_flat_index_id_4(self, connect, table, get_top_k):
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
query_id = non_exist_id
top_k = get_top_k
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- #
# ids, manual flush, search table, exist
def test_search_m_l2_index_params_id_1(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# ids, manual flush, search table, non exist
def test_search_m_l2_index_params_id_2(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
else:
assert not status.OK()
# ids, manual flush, delete, manual flush, search table, exist
def test_search_m_l2_index_params_id_3(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, manual flush, delete, manual flush, search table, non exist
def test_search_m_l2_index_params_id_4(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, manual flush, delete, no flush, search table, exist
def test_search_m_l2_index_params_id_5(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# ids, manual flush, delete, no flush, search table, non exist
def test_search_m_l2_index_params_id_6(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# # ids, manual flush, delete, no flush, add again no id, manual flush, search table, exist
# def test_search_m_l2_index_params_id_7(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_and_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
# else:
# assert not status.OK()
# # ids, manual flush, delete, no flush, add again no id, manual flush, search table, exist
# def test_search_m_l2_index_params_id_8(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add manual flush, delete no flush, add again manual flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_m_l2_index_params_id_9(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_and_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = non_exist_id + len(ids)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add manual flush, delete no flush, add again no flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_m_l2_index_params_id_10(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = non_exist_id + len(ids)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add manual flush, delete no flush, add again manual flush, search new id, exist
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_m_l2_index_params_id_11(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_and_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = new_ids[0]
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
# else:
# assert not status.OK()
# # add manual flush, delete no flush, add again no flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_m_l2_index_params_id_12(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = new_ids[0]
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# # assert len(result[0]) == min(len(vectors), top_k)
# # assert result[0][0].distance <= epsilon
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# add manual flush, delete no flush, add again manual flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
# updateddd #
def test_search_m_l2_index_params_id_13(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_and_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
# query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], query_id)
# assert result[0][0].id != query_id
else:
assert not status.OK()
# add manual flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_id_14(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
# query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add manual flush, delete no flush, add again manual flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_id_15(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_and_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
query_id = non_exist_id + len(ids)
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], query_id)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_id_16(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
query_id = non_exist_id + len(ids)
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], query_id)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- #
# ------------------------------------------------------------- l2, add auto flush, delete, search ------------------------------------------------------------- #
# ids, auto flush, search table, exist
def test_search_l2_index_params_id_1(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# ids, auto flush, search table, non exist
def test_search_l2_index_params_id_2(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, no flush, search table, exist
def test_search_l2_index_params_id_17(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_no_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert self.check_no_result(result[0])
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# ids, no flush, search table, non exist
def test_search_l2_index_params_id_18(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_no_flush_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id + len(ids)
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, auto flush, delete, auto flush, search table, exist
def test_search_l2_index_params_id_3(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, auto flush, delete, auto flush, search table, non exist
def test_search_l2_index_params_id_4(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ids, auto flush, delete, no flush, search table, exist
def test_search_l2_index_params_id_5(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# ids, auto flush, delete, no flush, search table, non exist
def test_search_l2_index_params_id_6(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# def test_search_test(self, connect, table):
# index_params = {'index_type':IndexType.IVF_SQ8, 'nlist':512}
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# logging.getLogger().info(ids[:10])
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# # status = self.delete_data_no_flush(connect, table, [query_id])
# # # status = self.delete_data(connect, table, [query_id])
# # assert status.OK()
# logging.getLogger().info(table)
# status, new_ids = connect.add_vectors(table, vectors)
# logging.getLogger().info(status)
# status = connect.create_index(table, index_params)
# logging.getLogger().info(new_ids[:10])
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# for r in result[0][:5]:
# logging.getLogger().info(r)
# # if top_k <= 1024:
# # assert status.OK()
# # assert len(result[0]) == min(len(vectors), top_k)
# # assert result[0][0].distance <= epsilon
# # assert check_result(result[0], ids[0])
# # else:
# # assert not status.OK()
# # ids, auto flush, delete, no flush, add again no id, auto flush, search table, exist
# def test_search_l2_index_params_id_7(self, connect, table, get_simple_index_params):
# index_params = {'index_type':IndexType.IVF_SQ8, 'nlist':512}
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# logging.getLogger().info(ids[:10])
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# # status = self.delete_data(connect, table, [query_id])
# assert status.OK()
# logging.getLogger().info(table)
# vectors, new_ids = self.add_data(connect, table, vectors)
# status = connect.create_index(table, index_params)
# logging.getLogger().info(new_ids[:10])
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
# else:
# assert not status.OK()
# # ids, auto flush, delete, no flush, add again no id, auto flush, search table, exist
# def test_search_l2_index_params_id_8(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add auto flush, delete no flush, add again auto flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_l2_index_params_id_9(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = non_exist_id + len(ids)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add auto flush, delete no flush, add again no flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_l2_index_params_id_10(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = non_exist_id + len(ids)
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# # add auto flush, delete no flush, add again auto flush, search new id, exist
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_l2_index_params_id_11(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# # vectors, new_ids = self.add_data(connect, table, vectors)
# vectors, new_ids = connect.add_vectors(table, vectors)
# status = connect.create_index(table, index_params)
# logging.getLogger().info(new_ids[:10])
# query_id = new_ids[0]
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
# else:
# assert not status.OK()
# # add auto flush, delete no flush, add again no flush
# # TODO: https://github.com/milvus-io/milvus/issues/1170
# def test_search_l2_index_params_id_12(self, connect, table, get_simple_index_params):
# index_params = get_simple_index_params
# logging.getLogger().info(index_params)
# vectors, ids = self.init_data_ids(connect, table, nb=small_size)
# status = connect.create_index(table, index_params)
# query_id = ids[0]
# top_k = 10
# nprobe = 1
# status = self.delete_data_no_flush(connect, table, [query_id])
# assert status.OK()
# vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
# status = connect.create_index(table, index_params)
# query_id = new_ids[0]
# status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# if top_k <= 1024:
# assert status.OK()
# # assert len(result[0]) == min(len(vectors), top_k)
# # assert result[0][0].distance <= epsilon
# assert self.check_no_result(result[0])
# else:
# assert not status.OK()
# add auto flush, delete no flush, add again auto flush
def test_search_l2_index_params_id_13(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
# query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_id_14(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
# query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_id_15(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
query_id = non_exist_id + len(ids)
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], query_id)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_id_16(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_ids(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush_ids(connect, table, vectors)
status = connect.create_index(table, index_params)
query_id = non_exist_id + len(ids)
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], query_id)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# ------------------------------------------------------------- l2, add auto flush, delete, search ------------------------------------------------------------- #
# ------------------------------------------------------------- l2, add auto flush, delete, search ------------------------------------------------------------- #
# auto flush
def test_search_l2_index_params_1(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# auto flush
def test_search_l2_index_params_2(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
else:
assert not status.OK()
# add auto flush, delete auto flush
def test_search_l2_index_params_3(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete auto flush
def test_search_l2_index_params_4(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete manual flush
def test_search_l2_index_params_5(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete manual flush
def test_search_l2_index_params_6(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush
def test_search_l2_index_params_7(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# add auto flush, delete no flush
def test_search_l2_index_params_8(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_9(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_10(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
def test_base_ops(self, connect, table):
vectors = gen_vectors(small_size, dim)
def search_and_show(table, top_k, nprobe, query_id):
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
index_params = {'index_type':IndexType.FLAT, 'nlist':512}
logging.getLogger().info(index_params)
vectors, ids = connect.add_vectors(table, vectors)
# sleep(4)
status = connect.create_index(table, index_params)
query_id = ids[0]
logging.getLogger().info('query_id: ' + str(query_id))
top_k = 10
nprobe = 1
search_and_show(table, top_k, nprobe, query_id)
def test_1170(self, connect, table):
def search_and_show(table, top_k, nprobe, query_id):
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
index_params = {'index_type':IndexType.FLAT, 'nlist':512}
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
logging.getLogger().info('query_id: ' + str(query_id))
top_k = 10
nprobe = 1
search_and_show(table, top_k, nprobe, query_id)
status = self.delete_data_no_flush(connect, table, [query_id])
search_and_show(table, top_k, nprobe, query_id)
# connect.flush([table])
# search_and_show(table, top_k, nprobe, query_id)
# vectors, new_ids = self.add_data(connect, table, vectors)
status, new_ids = connect.add_vectors(table, vectors)
logging.getLogger().info(status)
connect.flush([table])
# sleep(30)
# status = connect.create_index(table, index_params)
search_and_show(table, top_k, nprobe, query_id)
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_11(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_12(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_13(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_l2_index_params_14(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert check_result(result[0], query_id)
else:
assert not status.OK()
# ------------------------------------------------------------- l2, add auto flush, delete, search ------------------------------------------------------------- #
# add to table, auto flush, search table, search partition exist
def test_search_l2_index_params_partition_9(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: add vectors into table, search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k, search table with partition tag return empty
'''
index_params = get_simple_index_params
# index_params = {'nlist': 1024, 'index_type': IndexType.IVF_SQ8}
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert result[0][0].distance <= epsilon
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result) == 0
# add to table, auto flush, search partition exist
def test_search_l2_index_params_partition_1(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result) == 0
# add to partition, auto flush, search partition exist
def test_search_l2_index_params_partition_2(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
logging.getLogger().info(status)
logging.getLogger().info(result)
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], query_id)
# add to table, auto flush, search partition non exist
def test_search_l2_index_params_partition_3(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result) == 0
# # add to partition, auto flush, search partition non exist
def test_search_l2_index_params_partition_4(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert self.check_no_result(result[0])
# # add to table, manual flush, search partition exist
def test_search_l2_index_params_partition_5(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result) == 0
# add to partition, manual flush, search partition exist
def test_search_l2_index_params_partition_6(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], query_id)
# add to table, manual flush, search partition non exist
def test_search_l2_index_params_partition_7(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result) == 0
# add to partition, manual flush, search partition non exist
def test_search_l2_index_params_partition_8(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert self.check_no_result(result[0])
# add to table, manual flush, search non-existing partition non exist
def test_search_l2_index_params_partition_15(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=['non_existing_tag'])
assert status.OK()
assert len(result) == 0
# add to table, manual flush, search non-existing partition non exist
def test_search_l2_index_params_partition_14(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=['non_existing_tag'])
assert status.OK()
assert len(result) == 0
# add to partition, auto flush, search partition, exist
def test_search_l2_index_params_partition_13(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data(connect, partition_name, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag, "new_tag"])
logging.getLogger().info(result)
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert result[0][0].distance <= epsilon
# add to partition, auto flush, search partition, exist
def test_search_l2_index_params_partition_10(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
vectors, ids = self.init_data(connect, partition_name, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=["new_tag"])
assert status.OK()
assert len(result) == 0
# add to partition, auto flush, search partition, exist
def test_search_l2_index_params_partition_11(self, connect, table, get_simple_index_params):
new_tag = "new_tag"
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
new_partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
status = connect.create_partition(table, new_partition_name, new_tag)
vectors, ids = self.init_data(connect, partition_name, nb=small_size)
new_vectors, new_ids = self.init_data(connect, new_partition_name, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag])
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert result[0][0].distance <= epsilon
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag])
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], new_ids[0])
assert result[0][0].distance <= epsilon
# add to partition, auto flush, search partition, exist
def test_search_l2_index_params_partition_12(self, connect, table, get_simple_index_params):
tag = "atag"
new_tag = "new_tag"
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
new_partition_name = gen_unique_str()
status = connect.create_partition(table, partition_name, tag)
status = connect.create_partition(table, new_partition_name, new_tag)
vectors, ids = self.init_data(connect, partition_name, nb=small_size)
new_vectors, new_ids = self.init_data(connect, new_partition_name, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=["(.*)tag"])
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert result[0][0].distance <= epsilon
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=["new(.*)"])
assert len(result[0]) == min(len(new_vectors), top_k)
assert check_result(result[0], new_ids[0])
assert status.OK()
assert result[0][0].distance <= epsilon
# ------------------------------------------------------------- ip, add auto flush, delete, search ------------------------------------------------------------- #
# add auto flush
def test_search_ip_index_params_1(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
# assert abs(result[0][0].distance - numpy.inner(numpy.array(query_id[0]), numpy.array(query_id[0]))) <= gen_inaccuracy(result[0][0].distance)
else:
assert not status.OK()
# add auto flush
def test_search_ip_index_params_2(self, connect, ip_table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert check_result(result[0], ids[0])
assert self.check_no_result(result[0])
# assert abs(result[0][0].distance - numpy.inner(numpy.array(query_id[0]), numpy.array(query_id[0]))) <= gen_inaccuracy(result[0][0].distance)
else:
assert not status.OK()
# add auto flush, delete no flush
def test_search_ip_index_params_3(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, ip_table, [query_id])
assert status.OK()
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# add auto flush, delete no flush, non exist
def test_search_ip_index_params_4(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, ip_table, [query_id])
assert status.OK()
status, result = connect.search_by_id(ip_table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete manual flush
def test_search_ip_index_params_5(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, ip_table, [query_id])
assert status.OK()
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete manual flush
def test_search_ip_index_params_6(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, ip_table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete auto flush
def test_search_ip_index_params_7(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
# assert len(result[0]) == min(len(vectors), top_k)
# assert result[0][0].distance <= epsilon
# assert check_result(result[0], ids[0])
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush
def test_search_ip_index_params_8(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_9(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# logging.getLogger().info(connect.count_ip_table(ip_table))
# query_id = ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_10(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# logging.getLogger().info(connect.count_ip_table(ip_table))
# query_id = ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_11(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# logging.getLogger().info(connect.count_ip_table(ip_table))
# query_id = ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_12(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# logging.getLogger().info(connect.count_ip_table(ip_table))
# query_id = ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert check_result(result[0], query_id)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add auto flush, delete no flush, add again auto flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_13(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# logging.getLogger().info(connect.count_ip_table(ip_table))
query_id = new_ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add auto flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_ip_index_params_14(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, ip_table, [query_id])
status = self.delete_data(connect, ip_table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, ip_table, vectors)
status = connect.create_index(ip_table, index_params)
# # logging.getLogger().info(new_ids)
# # logging.getLogger().info(connect.count_ip_table(ip_table))
query_id = new_ids[0]
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# ------------------------------------------------------------- ip, add auto flush, delete, search ------------------------------------------------------------- #
# ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- #
# manual flush
def test_search_m_l2_index_params_1(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_no_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
else:
assert not status.OK()
# manual flush
def test_search_m_l2_index_params_2(self, connect, table, get_simple_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_no_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = non_exist_id
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
# assert len(result[0]) == 0
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete manual flush
def test_search_m_l2_index_params_5(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete manual flush
def test_search_m_l2_index_params_6(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_and_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush
def test_search_m_l2_index_params_7(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
# add manual flush, delete no flush
def test_search_m_l2_index_params_8(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
query_id = non_exist_id
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush, add again manual flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_9(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
status = self.delete_data_no_flush(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add manual flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_10(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush, add again manual flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_11(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_12(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
# query_id = ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, non_exist_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
assert self.check_no_result(result[0])
else:
assert not status.OK()
# add manual flush, delete no flush, add again manual flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_13(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# add manual flush, delete no flush, add again no flush
# TODO: https://github.com/milvus-io/milvus/issues/1170
def test_search_m_l2_index_params_14(self, connect, table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data_and_flush(connect, table, nb=small_size)
status = connect.create_index(table, index_params)
# logging.getLogger().info(ids)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
logging.getLogger().info(status)
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# status = self.delete_data_no_flush(connect, table, [query_id])
status = self.delete_data(connect, table, [query_id])
assert status.OK()
vectors, new_ids = self.add_data_no_flush(connect, table, vectors)
status = connect.create_index(table, index_params)
# # logging.getLogger().info(new_ids)
logging.getLogger().info(connect.count_table(table))
query_id = new_ids[0]
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
if top_k <= 1024:
assert status.OK()
for r in result[0][:5]:
logging.getLogger().info(r)
# assert self.check_no_result(result[0])
assert check_result(result[0], query_id)
else:
assert not status.OK()
# ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- #
def test_search_ip_index_params_partition(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(ip_table, partition_name, tag)
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
# logging.getLogger().info(result)
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
# logging.getLogger().info(str(index_params['index_type']) + '_tsip_1')
# for i in range(100):
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id, partition_tag_array=[tag])
# logging.getLogger().info(str(index_params['index_type']) + '_tsip_2')
# logging.getLogger().info(type(result[0]))
assert status.OK()
assert len(result) == 0
# assert self.check_no_result(result[0])
def test_search_ip_index_params_partition_1(self, connect, ip_table, get_simple_index_params):
index_params = get_simple_index_params
logging.getLogger().info(index_params)
partition_name = gen_unique_str()
status = connect.create_partition(ip_table, partition_name, tag)
vectors, ids = self.init_data(connect, partition_name, nb=small_size)
status = connect.create_index(ip_table, index_params)
query_id = ids[0]
top_k = 10
nprobe = 1
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id, partition_tag_array=[tag])
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
status, result = connect.search_by_id(partition_name, top_k, nprobe, query_id)
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
@pytest.mark.level(2)
def test_search_by_id_without_connect(self, dis_connect, table):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
query_idtors = 123
top_k = 1
nprobe = 1
with pytest.raises(Exception) as e:
status, ids = dis_connect.search_by_id(table, top_k, nprobe, query_idtors)
def test_search_table_name_not_existed(self, connect, table):
'''
target: search table not existed
method: search with the random table_name, which is not in db
expected: status not ok
'''
table_name = gen_unique_str("not_existed_table")
top_k = 1
nprobe = 1
query_id = non_exist_id
status, result = connect.search_by_id(table_name, top_k, nprobe, query_id)
assert not status.OK()
def test_search_table_name_None(self, connect, table):
'''
target: search table that table name is None
method: search with the table_name: None
expected: status not ok
'''
table_name = None
top_k = 1
nprobe = 1
query_ids = non_exist_id
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(table_name, top_k, nprobe, query_id)
# def test_search_distance_l2_flat_index(self, connect, table):
# nb = 2
# top_k = 1
# nprobe = 1
# vectors, ids = self.init_data(connect, table, nb=nb)
# vs = [[0.50 for i in range(dim)]]
# query_ids = ids
# distance_0 = numpy.linalg.norm(numpy.array(vs[0]) - numpy.array(vectors[0]))
# distance_1 = numpy.linalg.norm(numpy.array(vs[0]) - numpy.array(vectors[1]))
# status, result = connect.search_by_id(table, top_k, nprobe, ids[0])
# assert abs(numpy.sqrt(result[0][0].distance) - min(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
# def test_search_distance_ip_flat_index(self, connect, ip_table):
# '''
# target: search ip_table, and check the result: distance
# method: compare the return distance value with value computed with Inner product
# expected: the return distance equals to the computed value
# '''
# nb = 2
# top_k = 1
# nprobe = 1
# vectors, ids = self.init_data(connect, ip_table, nb=nb)
# index_params = {
# "index_type": IndexType.FLAT,
# "nlist": 16384
# }
# connect.create_index(ip_table, index_params)
# logging.getLogger().info(connect.describe_index(ip_table))
# query_ids = [[0.50 for i in range(dim)]]
# distance_0 = numpy.inner(numpy.array(query_ids[0]), numpy.array(vectors[0]))
# distance_1 = numpy.inner(numpy.array(query_ids[0]), numpy.array(vectors[1]))
# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_ids)
# assert abs(result[0][0].distance - max(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
def test_search_distance_jaccard_flat_index(self, connect, jac_table):
# from scipy.spatial import distance
top_k = 10
nprobe = 512
int_vectors, vectors, ids = self.init_binary_data(connect, jac_table, nb=small_size)
index_params = {
"index_type": IndexType.FLAT,
"nlist": 16384
}
connect.create_index(jac_table, index_params)
logging.getLogger().info(ids[:10])
# logging.getLogger().info(connect.describe_table(jac_table))
# logging.getLogger().info(connect.describe_index(jac_table))
# query_id = random.choice(ids)
query_id = ids[0]
logging.getLogger().info(query_id)
status, result = connect.search_by_id(jac_table, top_k, nprobe, query_id)
logging.getLogger().info(status)
logging.getLogger().info(result[0])
# assert abs(result[0][0].distance - min(distance_0, distance_1)) <= epsilon
def test_search_distance_hamming_flat_index(self, connect, ham_table):
# from scipy.spatial import distance
top_k = 10
nprobe = 512
int_vectors, vectors, ids = self.init_binary_data(connect, ham_table, nb=small_size)
index_params = {
"index_type": IndexType.FLAT,
"nlist": 16384
}
connect.create_index(ham_table, index_params)
logging.getLogger().info(connect.describe_table(ham_table))
logging.getLogger().info(connect.describe_index(ham_table))
# query_int_vectors, query_ids, tmp_ids = self.init_binary_data(connect, ham_table, nb=1, insert=False)
# distance_0 = hamming(query_int_vectors[0], int_vectors[0])
# distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query_id = ids[0]
status, result = connect.search_by_id(ham_table, top_k, nprobe, query_id)
logging.getLogger().info(status)
logging.getLogger().info(result)
# assert abs(result[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
def test_search_distance_tanimoto_flat_index(self, connect, tanimoto_table):
# from scipy.spatial import distance
top_k = 10
nprobe = 512
int_vectors, vectors, ids = self.init_binary_data(connect, tanimoto_table, nb=small_size)
index_params = {
"index_type": IndexType.FLAT,
"nlist": 16384
}
connect.create_index(tanimoto_table, index_params)
logging.getLogger().info(connect.describe_table(tanimoto_table))
logging.getLogger().info(connect.describe_index(tanimoto_table))
# query_int_vectors, query_ids, tmp_ids = self.init_binary_data(connect, tanimoto_table, nb=1, insert=False)
# distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
# distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query_id = ids[0]
status, result = connect.search_by_id(tanimoto_table, top_k, nprobe, query_id)
logging.getLogger().info(status)
logging.getLogger().info(result)
# assert abs(result[0][0].distance - min(distance_0, distance_1)) <= epsilon
# def test_search_distance_ip_index_params(self, connect, ip_table, get_index_params):
# '''
# target: search table, and check the result: distance
# method: compare the return distance value with value computed with Inner product
# expected: the return distance equals to the computed value
# '''
# top_k = 2
# nprobe = 1
# vectors, ids = self.init_data(connect, ip_table, nb=2)
# index_params = get_index_params
# connect.create_index(ip_table, index_params)
# logging.getLogger().info(connect.describe_index(ip_table))
# query_ids = [[0.50 for i in range(dim)]]
# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_ids)
# logging.getLogger().debug(status)
# logging.getLogger().debug(result)
# distance_0 = numpy.inner(numpy.array(query_ids[0]), numpy.array(vectors[0]))
# distance_1 = numpy.inner(numpy.array(query_ids[0]), numpy.array(vectors[1]))
# assert abs(result[0][0].distance - max(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
# TODO: enable
# @pytest.mark.repeat(5)
@pytest.mark.timeout(30)
def test_search_concurrent(self, connect, table):
vectors, ids = self.init_data(connect, table, nb=small_size)
thread_num = 10
nb = 100
top_k = 10
threads = []
# query_ids = vectors[nb//2:nb]
def search(query_id):
status, result = connect.search_by_id(table, top_k, query_id)
ids = [x.id for x in result[0]]
assert query_id in ids
assert result[0][0].distance == 0.0
for i in range(thread_num):
query_id = random.choice(vectors)
x = threading.Thread(target=search, args=(query_id))
threads.append(x)
x.start()
for th in threads:
th.join()
# TODO: enable
@pytest.mark.timeout(30)
def test_search_concurrent_multiprocessing(self, args):
nb = 100
top_k = 10
process_num = 4
processes = []
table = gen_unique_str("test_search_concurrent_multiprocessing")
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_type': IndexType.FLAT}
# create table
milvus = get_milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
vectors, ids = self.init_data(milvus, table, nb=nb)
def search(milvus, query_id):
status, result = milvus.search_by_id(table, top_k, query_id)
ids = [x.id for x in result[0]]
assert query_id in ids
assert result[0][0].distance == 0.0
for i in range(process_num):
milvus = get_milvus()
milvus.connect(uri=uri)
query_id = random.choice(vectors)
p = Process(target=search, args=(milvus, query_id))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_search_multi_table_L2(search, args):
num = 10
top_k = 10
nprobe = 1
tables = []
idx = []
for i in range(num):
table = gen_unique_str("test_add_multitable_%d" % i)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_file_size': 10,
'metric_type': MetricType.L2}
# create table
milvus = get_milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
status, ids = milvus.add_vectors(table, vectors)
assert status.OK()
assert len(ids) == len(vectors)
tables.append(table)
idx.append(random.choice(ids))
time.sleep(6)
# start query from random table
for i in range(num):
table = tables[i]
status, result = milvus.search_by_id(table, top_k, nprobe, idx[i])
assert status.OK()
assert check_result(result[0], idx[i])
def test_search_multi_table_IP(search, args):
'''
target: test search multi tables of IP
method: add vectors into 10 tables, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nprobe = 1
tables = []
idx = []
for i in range(num):
table = gen_unique_str("test_add_multitable_%d" % i)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_file_size': 10,
'metric_type': MetricType.IP}
# create table
milvus = get_milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
status, ids = milvus.add_vectors(table, vectors)
assert status.OK()
assert len(ids) == len(vectors)
tables.append(table)
idx.append(random.choice(ids))
time.sleep(6)
# start query from random table
for i in range(num):
table = tables[i]
status, result = milvus.search_by_id(table, top_k, nprobe, idx[i])
assert status.OK()
assert check_result(result[0], idx[i])
"""
******************************************************************
# The following cases are used to test `search_by_id` function
# with invalid table_name top-k / nprobe / query_range
******************************************************************
"""
class TestSearchInvalid(object):
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
logging.getLogger().info(index_param)
def init_data(self, connect, table, nb=6000):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 6000:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
status, ids = connect.add_vectors(table, add_vectors)
sleep(add_interval_time)
return add_vectors, ids
"""
Test search table with invalid table names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_table_names()
)
def get_table_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_search_with_invalid_tablename(self, connect, get_table_name):
table_name = get_table_name
logging.getLogger().info(table_name)
top_k = 1
nprobe = 1
query_id = non_exist_id
status, result = connect.search_by_id(table_name, top_k, nprobe, query_id)
assert not status.OK()
@pytest.mark.level(1)
def test_search_with_invalid_tag_format(self, connect, table):
top_k = 1
nprobe = 1
query_id = non_exist_id
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(table_name, top_k, nprobe, query_id, partition_tag_array="tag")
"""
Test search table with invalid top-k
"""
@pytest.fixture(
scope="function",
params=gen_invalid_top_ks()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_with_invalid_top_k(self, connect, table, get_top_k):
top_k = get_top_k
logging.getLogger().info(top_k)
nprobe = 1
query_id = non_exist_id
if isinstance(top_k, int):
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
@pytest.mark.level(2)
def test_search_with_invalid_top_k_ip(self, connect, ip_table, get_top_k):
top_k = get_top_k
logging.getLogger().info(top_k)
nprobe = 1
query_id = non_exist_id
if isinstance(top_k, int):
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
"""
Test search table with invalid nprobe
"""
@pytest.fixture(
scope="function",
params=gen_invalid_nprobes()
)
def get_nprobes(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_with_invalid_nprobe(self, connect, table, get_nprobes):
top_k = 1
nprobe = get_nprobes
logging.getLogger().info(nprobe)
query_id = non_exist_id
if isinstance(nprobe, int):
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(table, top_k, nprobe, query_id)
@pytest.mark.level(2)
def test_search_with_invalid_nprobe_ip(self, connect, ip_table, get_nprobes):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = 1
nprobe = get_nprobes
logging.getLogger().info(nprobe)
query_id = non_exist_id
if isinstance(nprobe, int):
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id)
"""
Test search table with invalid ids
"""
@pytest.fixture(
scope="function",
params=gen_invalid_vector_ids()
)
def get_vector_ids(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_flat_with_invalid_vector_id(self, connect, table, get_vector_ids):
'''
target: test search fuction, with the wrong query_range
method: search with query_range
expected: raise an error, and the connection is normal
'''
vectors, ids = self.init_data(connect, table, nb=small_size)
top_k = 1
nprobe = 1
query_id = get_vector_ids
logging.getLogger().info(query_id)
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(table, 1, nprobe, query_id)
@pytest.mark.level(2)
def test_search_flat_with_invalid_vector_id_ip(self, connect, ip_table, get_vector_ids):
vectors, ids = self.init_data(connect, ip_table, nb=small_size)
top_k = 1
nprobe = 1
query_id = get_vector_ids
logging.getLogger().info(query_id)
with pytest.raises(Exception) as e:
status, result = connect.search_by_id(ip_table, 1, nprobe, query_id)
def check_result(result, id):
if len(result) >= 5:
# return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id]
return id in [x.id for x in result[:5]]
else:
return id in (i.id for i in result) |
pivideostream.py | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
class PiVideoStream:
def __init__(self, resolution=(640,480), framerate=32):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
ROXXANE1_0.py | import datetime
import json
import math
import random
import threading
import tkinter as tk
import webbrowser
from datetime import *
from time import sleep
from tkinter import *
import pandas as pd
import psutil
import pyaudio
import pyautogui
import pyperclip
import pyttsx3
import qrcode
import requests
import speech_recognition as sr
import wikipedia
from plyer import notification
from tqdm import tqdm
from vosk import Model, KaldiRecognizer
engine = pyttsx3.init()
model = Model("../model")
rec = KaldiRecognizer(model, 16000)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)
stream.start_stream()
despedidas = ['tchal', 'adeus', 'te vejo mais tarde', 'até', 'até logo']
pacoteOffice = ['word', 'excel', 'powerpoint', 'access', 'teams']
opcoes = ['word', 'excel', 'powerpoint', 'access', 'teams', 'genshin impact', 'chrome', 'microsoft edge']
class ROXXANE(threading.Thread):
def __init__(self, asist_name, person):
threading.Thread.__init__(self) # thread da Roxxane
self.pessoa = person
self.nome_assistente = asist_name
self.voz_data = ''
self.aux = ''
self.arquivoR = open(r"../databaseR.txt", 'r', encoding='utf-8')
self.arquivoR = self.arquivoR.read()
self.arquivoR = self.arquivoR.split()
self.arquivoF = open(r"../databaseF.txt", 'r', encoding='utf-8')
self.arquivoF = self.arquivoF.read()
self.arquivoF = self.arquivoF.split()
def existe(self, termos):
for termo in termos:
if termo in self.voz_data:
return True
def pegar_comandos_separados(self):
while True:
rec.pause_threshold = 1
data = stream.read(10000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
resultado = rec.FinalResult()
resultado = json.loads(resultado)
if resultado is not None:
return resultado['text'].lower()
# rec.pause_threshold = 1
# data_audio = stream.read(8000)
# rec.AcceptWaveform(data_audio)
# try:
# retorno = rec.FinalResult()
# except:
# print('Fala mais alto')
# return 'none'
# return retorno
def conversas(self):
if self.existe(['hey', 'hi', 'hello', 'oi', 'holla']):
greetigns = [f'Oi {self.pessoa}, o que você está fazendo hoje?',
f'Oi {self.pessoa}, como eu posso te ajudar?',
f'Oi {self.pessoa}, você precisa de algo?',
f'Oi {self.pessoa}, como vai você?']
greet = greetigns[random.randint(0, len(greetigns) - 1)]
engine_speak(greet)
self.voz_data = ''
elif self.existe(['tudo bem', 'como você está', 'está tudo bem', 'está tude bem com você']):
frases = [f'eu estou bem, {self.pessoa}, obrigada',
f'estou bem, muito obrigada, {self.pessoa}',
f'eu estou bem {self.pessoa}, como vai você?']
fala = frases[random.randint(0, len(frases) - 1)]
engine_speak(fala)
self.voz_data = ''
# name
elif self.existe(['qual é seu nome', 'me dia seu nome', 'seu nome é', 'seu nome']):
engine_speak(f'Meu nome é {self.nome_assistente}')
elif self.existe(['como você está', 'está tudo bem com você', 'está feliz']):
falas = [f'eu estou bem {self.pessoa}, obrigada por se preocupar',
f'eu estou ótima, {self.pessoa}, obrigada'
f'eu estou muito feliz como estou hoje, {self.pessoa}']
fala = falas[random.randint(0, len(falas) - 1)]
engine_speak(fala)
self.voz_data = ''
elif self.existe(['cu', 'caralho', 'porra', 'tá surda', 'cú']):
engine_speak('Olha a boca menino')
engine_speak('tenha modas')
elif self.existe(despedidas):
engine_speak("Tenha um bom dia! até logo!")
exit(0)
elif self.existe(['bom dia', 'boa tarde', 'boa noite']):
hora = int(datetime.datetime.now().hour)
if 0 <= hora < 12:
engine_speak('Olá')
engine_speak('Bom dia')
elif 12 <= hora < 18:
engine_speak('Agora não é mais de manhã')
engine_speak('Já passou do meio dia')
engine_speak('Estamos no período da tarde')
engine_speak('boa tarde')
elif 0 != hora >= 18:
engine_speak('Agora não é de manhã')
engine_speak('Já estamos no período noturno')
engine_speak('Boa noite')
elif self.existe(['funcionando bem', 'bem', 'como você tem estado', 'você tem estado', 'tem estado']):
engine_speak(f'Eu estou funcionando bem e sem problemas, {self.pessoa}, obrigada por perguntar')
engine_speak('como você está?')
while True:
voz = self.pegar_comandos_separados()
if 'bem' in voz:
engine_speak('Que bom, espero que continue assim!')
break
if 'mal' in voz or 'mau' in voz:
engine_speak('que pena, eu sei que logo logo vai passar')
break
else:
if self.retornar_fala_de_txt_resposta(str(self.voz_data)) is None:
self.escrever_em_txt_dados(str(self.voz_data))
self.voz_data = ''
else:
engine_speak(self.retornar_fala_de_txt_resposta(str(self.voz_data)))
self.voz_data = ''
# comandos
def respostas(self):
# google
if self.existe(['pesquise por', 'pesquisar por', 'pesquise no google por']):
search_term = self.voz_data.split("por")[-1]
url = "http://google.com/search?q=" + str(search_term)
webbrowser.get().open(url)
self.voz_data = ''
engine_speak("Aqui está o que você pediu sobre " + search_term + 'no google')
# youtube
elif self.existe(["pesquise no youtube por"]):
search_term = self.voz_data.split("por")[-1]
url = "http://www.youtube.com/results?search_query=" + str(search_term)
webbrowser.get().open(url)
self.voz_data = ''
engine_speak("Aqui está o que você pediu sobre" + search_term + 'no youtube')
# open something
elif self.existe(["abrir "]):
search_term = self.voz_data.split()
search_term.remove('abrir')
search_term = str(search_term).strip('[]').replace("'", "").replace(',', '')
abrir_algo(search_term)
self.voz_data = ''
engine_speak("O " + search_term + " está aberto, pronto.")
# open google
elif self.existe(['abra o google']):
webbrowser.open('www.google.com')
engine_speak('pronto')
# close tab of Chrome
elif self.existe(["fechar aba", 'fechar a', 'aba']):
pyautogui.PAUSE = 1
pyautogui.hotkey('ctrl', 'w')
engine_speak("Pronto.")
# close all
elif self.existe(["fechar abas", 'fechar todas as abas', 'fechar tudo']):
pyautogui.PAUSE = 1
pyautogui.hotkey('alt', 'f4')
engine_speak("Pronto.")
# write something
elif self.existe(['escreva para mim', 'escreva']):
if self.existe(['assunto copiado', 'assunto', 'copiado', 'escrever dados copiado']):
pyautogui.PAUSE = 1
pyperclip.copy(self.aux)
pyautogui.hotkey('ctrl', 'v')
return
escrever = self.voz_data.split()
if 'escreva' in escrever:
escrever.remove('escreva')
elif 'escreva' in escrever and escrever.index('escreva') == 0:
escrever.remove('escreva')
escrever.remove('para')
escrever.remove('mim')
escrever = str(escrever).strip('[]').replace("'", "").replace(',', '')
pyperclip.copy(escrever)
pyautogui.hotkey('ctrl', 'v')
# speak hour
elif self.existe(['me fale as horas', 'fale as horas', 'horas']):
horario()
# speak date
elif self.existe(['me fale o dia de hoje', 'fale a data de hoje', 'que dia é hoje', 'data']):
datahj()
# stop
elif self.existe(['parar', 'descansar', 'pausar', 'dar um tempo']):
engine_speak('Beleza')
engine_speak('estou aqui te esperando')
engine_speak('se precisar de algo, só dizer para voltar')
while True:
voz = self.pegar_comandos_separados()
if 'voltar' in voz:
engine_speak('Ok')
engine_speak('Voltando')
engine_speak('Não me deixe sozinha por muito tempo')
engine_speak('vamos fazer alguma coisa logo')
self.run()
elif 'retornar' in voz:
engine_speak('Ok')
engine_speak('Retornando')
engine_speak('Ficar em silencio é chato')
engine_speak('Me fale algo para fazer')
self.run()
# pesquisar na wikipedia
elif self.existe(['assunto', 'wikipedia', 'pesquise sobre']):
try:
engine_speak('Beleza, me fale qual o assunto que você quer que eu pesquise?')
voz = engine_reconition_online()
if voz is None:
voz = self.pegar_comandos_separados()
wikipedia.set_lang("pt")
resultadowik = wikipedia.summary(voz, sentences=2)
engine_speak('Você deseja ouvir o assunto ou escrever em outro lugar:')
while True:
voz = str(self.pegar_comandos_separados())
if 'ouvir' in voz:
engine_speak(resultadowik)
return
elif 'escrever' in voz:
self.aux = resultadowik
break
elif 'escrever em' in voz:
self.aux = resultadowik
break
elif 'escrever em outro lugar' in voz:
self.aux = resultadowik
break
except:
engine_speak('Desculpe, não consegui me conectar a internet')
# tempo
elif self.existe(['me diga o clima', 'clima', 'me diga o tempo de hoje', 'tempo de hoje', 'tempo']):
try:
url = requests.get('https://api.hgbrasil.com/weather')
url_json = url.json()
# hoje
cida = url_json['results']['city']
temperatura = url_json['results']['temp']
condicao = url_json['results']['description']
humidade = url_json['results']['humidity']
velocidade_vento = url_json['results']['wind_speedy']
engine_speak("O tempo")
engine_speak("na cidade de " + cida + ":")
engine_speak("Temperatura igual a " + str(temperatura))
engine_speak("A condição de hoje é: " + condicao)
engine_speak("A humidade é de " + str(humidade) + '%')
engine_speak("A velocidade do vento é de " + str(velocidade_vento))
engine_speak('Você deseja ver um resumo dos próximos 10 dias?')
while True:
voz = str(self.pegar_comandos_separados())
aux = ''
if 'sim' in voz:
for c in range(10):
data = url_json['results']['forecast'][c]['date']
dia = url_json['results']['forecast'][c]['weekday']
maxi = url_json['results']['forecast'][c]['max']
mini = url_json['results']['forecast'][c]['min']
descricao = url_json['results']['forecast'][c]['description']
aux += (
"Data: " + str(data) + ", Dia da semana: " + str(dia) + ", Temp. máxima: " + str(
maxi) + ', Temp. mínima:' + str(mini) + ", Clima: " + str(descricao) + "\n")
pyautogui.alert(aux, title='Resumo dos próximos dias')
break
if 'não' in voz:
break
except requests.exceptions:
engine_speak('Desculpe, mas não consegui me conectar a a internet')
elif self.existe(['faça uma conta para mim', 'calculadora', 'calcular', 'faça uma conta']):
calculadora()
elif self.existe(['qr code', 'code', 'faça um qr code', 'faça um code']):
criar_qrcode()
elif self.existe(['faz uma análise', 'análise de dados', 'dados', 'análise de', 'fazer uma análise']):
nome = pyautogui.prompt('Digite aqui o nome do arquivo', title='Nome do arquivo')
tabela = pd.read_excel(rf"C:\\Users\\User\\Desktop\\{nome}.xlsx")
else:
# options apps
if self.existe(opcoes):
abrir_algo(self.voz_data.lower())
elif self.existe(['fale as opções', 'opções de aplicativos', 'opções']):
engine_speak('beleza, vou te falar as opções, por favor diga apenas o número da opção que você quer '
'depois de fechar a janela.')
engine_speak('As opções são: ')
aux = ''
cont = 1
for op in opcoes:
aux += f'{cont}° opção é: {op}\n'
cont += 1
pyautogui.alert(aux, title='Opções')
engine_speak('qual opção você quer?')
while True:
vozn = self.pegar_comandos_separados()
vozn = self.ajeitar_numero(vozn)
if type(vozn) == int:
abrir_algo(opcoes[vozn])
self.conversas()
def ajeitar_numero(self, numero):
nums = ['um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez']
for n in nums:
if n in numero:
return nums.index(n) + 1
def escrever_em_txt_dados(self, lista_palavras):
lista_frases = []
contR = int(self.arquivoF[-2]) + 1
with open(r"../databaseF.txt", 'a', encoding='utf-8') as F:
lista_palavras = lista_palavras.replace(" ", "_")
F.write('\n')
F.write(f'{contR} ')
F.write(f'{lista_palavras} ')
engine_speak("Quais são as possíveis respostas para essa pergunta?")
engine_speak("fale 'break'/'parar' para parar!!!")
while True:
# aux = pyautogui.prompt('Digite: ', title="Respostas(digite 'break' para parar)")
aux = engine_reconition_online()
if aux is None:
aux = virtual_assistente.pegar_comandos_separados()
if aux is not None:
if 'break' == aux:
break
if 'parar' == aux:
break
lista_frases.append(aux)
self.escrever_em_txt_resposta(lista_frases, contR)
engine_speak("Pronto, frase salvo no banco de dados")
def escrever_em_txt_resposta(self, lista_palavras, cont):
with open(r"../databaseR.txt", 'a', encoding='utf-8') as R:
for passw in lista_palavras:
passw = passw.replace(" ", "_")
R.write('\n')
R.write(f'{cont} ')
R.write(f'{passw} ')
def retornar_fala_de_txt_resposta(self, fala):
try:
index = int(self.arquivoF[self.arquivoF.index(fala.replace(" ", "_")) - 1])
resposta = []
for c in range(0, len(self.arquivoR), 2):
if int(self.arquivoR[c]) == int(index):
resposta.append(self.arquivoR[c + 1])
if len(resposta) > 1:
return resposta[random.randint(0, len(resposta) - 1)].replace("_", " ")
return resposta[0].replace("_", " ")
except ValueError:
return None
def run(self):
while True:
try:
virtual_assistente.voz_data = engine_reconition_online()
if virtual_assistente.voz_data is None:
data = stream.read(8000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
resultado = rec.FinalResult()
resultado = json.loads(resultado)
if resultado is not None:
virtual_assistente.voz_data = resultado['text'].lower()
virtual_assistente.respostas()
else:
virtual_assistente.respostas()
except sr.WaitTimeoutError:
engine_speak('Por favor, não fique muito tempo sem falar')
continue
class tela(threading.Thread, tk.Label):
def __init__(self, nome, master):
threading.Thread.__init__(self)
self.nome = nome
self.master_tela = master
self.texto_resposta = Label(self.master_tela, font=('Arial', 12), fg='Black', bg='white', height=1, width=20)
self.texto_resposta.grid(column=0, row=1, padx=20, pady=20)
self.texto_cpu = Label(self.master_tela, font=('Arial', 12), fg='black', bg='white', height=1, width=5)
self.texto_cpu.grid(column=0, row=2, ipadx=1, pady=190)
self.atualizar()
self.atualizar_cpu()
def atualizar(self):
dataatual = datetime.now().strftime(f'%d/%m/%Y - %H:%M:%S Hrs')
self.texto_resposta['text'] = dataatual
# threading.Thread(target=self.atualizar).start()
self.master_tela.after(1, self.atualizar)
def atualizar_cpu(self):
porcentagem = str(psutil.cpu_percent())
self.texto_cpu['text'] = porcentagem
# threading.Thread(target=self.atualizar_cpu).start()
self.master_tela.after(1000, self.atualizar_cpu)
def comecar():
engine_speak('Estou iniciando, espra um pouco aí...')
for c in tqdm(range(10)):
sleep(1)
print('.', end='')
engine_speak('preparando módulos...')
for c in tqdm(range(10)):
sleep(1)
print('.', end='')
engine_speak('finalizando...')
for c in tqdm(range(10)):
sleep(1)
print('.', end='')
engine_speak('Pronto, já vou aparecer...')
def criar_qrcode():
engine_speak("Você deseja criar vários qr codes de vários links, ou apenas um?")
try:
while True:
voz = engine_reconition_online()
if voz is None:
voz = virtual_assistente.pegar_comandos_separados()
if voz is not None:
break
if 'apenas um' in voz or 'um' in voz or 'apenas' in voz:
engine_speak('Beleza, digita para mim o link que você deseja usar para criar o QR code')
link = pyautogui.prompt("Digite o link: ", title='Link')
engine_speak('Agora digite para mim o nome do arquivo: ')
nome = pyautogui.prompt("Nome do arquivo: ", title="Nome")
engine_speak('Certo, espera um pouco aí...')
meu_qrcode = qrcode.make(link)
meu_qrcode.save(fr"C:\Users\User\Desktop\qrcode_{nome}.png")
engine_speak("Pronto, já está na sua área de trabalho")
else:
engine_speak('Beleza, quantos você quer fazer?')
while True:
vozq = engine_reconition_online()
if vozq is None:
vozq = virtual_assistente.pegar_comandos_separados()
if vozq is not None:
break
vozq = virtual_assistente.ajeitar_numero(vozq)
links = {}
for c in range(int(vozq)):
nome = pyautogui.prompt("Nome: ", title="Nome do arquivo")
link = pyautogui.prompt("Digite o link: ", title='Link')
links[f'{nome}'] = str(link)
for produto in links:
meu_qrcode = qrcode.make(links[produto])
meu_qrcode.save(f"qrcode_{produto}.png")
engine_speak('Pronto, qr codes feitos')
except sr.RequestError:
engine_speak("Não foi possível conectar a internet")
except BaseException:
engine_speak('Desculpe, não pude realizar o processo, algo deu errado')
def calculadora():
engine_speak('Beleza, o que você deseja calcular?')
engine_speak('Por favor, falar apenas equações simples, afinal estou em fase de desenvolvimento')
try:
while True:
voz = str(engine_reconition_online().lower())
if voz is None:
voz = virtual_assistente.pegar_comandos_separados().lower()
break
if voz is not None:
break
voz = voz.split()
while True:
indexAux = ''
if str(voz).count("fatorial"):
nums = separar(voz, (voz.index("fatorial")))
num = nums[1]
valor_total = 1
for c in range(1, num + 1):
valor_total *= c
valor_total = int(valor_total)
voz.insert(int(voz.index("fatorial")), str(valor_total))
voz.remove("fatorial")
voz.remove("de")
voz.remove(str(nums[1]))
continue
if str(voz).count('elevado') != 0:
if '^' in voz:
indexAux = '^'
if 'elevado' in voz:
indexAux = 'elevado'
nums = separar(voz, (voz.index(indexAux)))
valor_total = math.pow(nums[0], nums[1])
valor_total = int(valor_total)
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'elevado':
voz.remove('a')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('x') != 0 or str(voz).count('vezes') != 0:
if 'x' in voz:
indexAux = 'x'
if 'vezes' in voz:
indexAux = 'vezes'
nums = separar(voz, voz.index(indexAux))
valor_total = (nums[0] * nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('/') != 0 or str(voz).count('dividido') != 0 or str(voz).count("÷"):
if '÷' in voz:
indexAux = '÷'
if 'dividido' in voz:
indexAux = 'dividido'
if '/' in voz:
indexAux = '/'
nums = separar(voz, voz.index(indexAux))
valor_total = (nums[0] / nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('somado') != 0 or str(voz).count('+') != 0:
if '+' in voz:
indexAux = '+'
if 'somado' in voz:
indexAux = 'somado'
nums = separar(voz, voz.index(indexAux))
valor_total = (nums[0] + nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'somado':
voz.remove('a')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if str(voz).count('subtraido') != 0 or str(voz).count('-') != 0:
if '-' in voz:
indexAux = '-'
if 'subtraido' in voz:
indexAux = 'subtraido'
nums = separar(voz, voz.index(indexAux))
valor_total = (nums[0] - nums[1])
voz.insert(int(voz.index(indexAux)), str(valor_total))
voz.remove(indexAux)
if indexAux == 'subtraido':
voz.remove('de')
voz.remove(str(nums[0]))
voz.remove(str(nums[1]))
continue
if not str(voz).isalnum():
engine_speak('Deseja ouvir o resultado ou colar em algum lugar?')
while True:
vozsair = engine_reconition_online()
if vozsair is None:
vozsair = virtual_assistente.pegar_comandos_separados().lower()
break
if vozsair is not None:
vozsair = vozsair.lower()
break
if 'colar' in vozsair:
virtual_assistente.aux = vozsair
break
if 'ouvir' in vozsair:
engine_speak('O resultado é:' + str(voz).strip('[]').replace("'", ""))
break
if not str(voz).isalpha():
engine_speak('Deseja fazer mais alguma conta?')
while True:
vozsair = engine_reconition_online()
if vozsair is None:
vozsair = virtual_assistente.pegar_comandos_separados().lower()
break
if vozsair is not None:
vozsair = vozsair.lower()
break
if 'sim' in vozsair:
continue
else:
break
except:
engine_speak("Desculpe, algo deu errado")
engine_speak("Pode ter sido a conexão, ou algum valor inválido")
def separar(voz_data, index=0):
aux1 = 0
aux2 = 0
for c in range(index, -1, -1):
if voz_data[c].isnumeric():
aux1 = voz_data[c]
break
for c2 in range(index, len(voz_data)):
if voz_data[c2].isnumeric():
aux2 = voz_data[c2]
break
return [int(aux1), int(aux2)]
def engine_speak(text):
"""
fala da assitente virtual
"""
text = str(text)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
engine.say(text)
engine.runAndWait()
def engine_reconition_online():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source, 10, 10) # define o microfone como fonte de áudio
try:
virtual_assistente.voz_data = r.recognize_google(audio, language='pt')
return virtual_assistente.voz_data.lower()
except sr.UnknownValueError:
engine_speak('Por favor, fale novamente, eu não entendi o que você falou')
except sr.RequestError:
return None
def Boas_vindas():
if 0 < datetime.now().hour > 12:
engine_speak('Bom dia')
elif 12 <= datetime.now().hour > 18:
engine_speak('Boa tarde')
else:
engine_speak('Boa noite')
engine_speak(f'Oi {virtual_assistente.pessoa}, como você está?') # ['estou bem, obrigado', 'estou', 'sim
# estou bem obrigado']
voz = virtual_assistente.pegar_comandos_separados()
if 'estou' in voz or 'obrigado' in voz or 'bem' in voz:
engine_speak('que bom, então, vamos fazer alguma coisa?')
voz = virtual_assistente.pegar_comandos_separados()
if 'bora' in voz:
engine_speak('Beleza, bora')
elif 'beleza' in voz:
engine_speak('Beleza, bora')
elif 'claro' in voz:
engine_speak('Beleza, bora')
def write(textow=''):
pyperclip.copy(textow)
pyautogui.hotkey('ctrl', 'v')
def notificar(text=''''''):
notification.notify(title="R.O.X.X.A.N.E", message=text, timeout=20)
def horario():
hora = datetime.now()
engine_speak('Agora ' + hora.strftime('São %H horas e %M minutos'))
def datahj():
data = date.today()
semana = ('Segunda-feira', 'Terça-feira', 'Quarta-feira', 'Quinta-feira', 'Sexta-feira', 'Sábado', 'Domingo')
meses = ('Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho', 'Agosto', 'Setembro', 'Outubro',
'Novembro', 'Dezembro')
mesatual = (meses[data.month])
ano = data.strftime(" de %Y")
engine_speak("Hoje é " + semana[data.weekday()])
engine_speak('Dia ' + str(data.day) + mesatual + ano)
def abrir_algo(name):
pyautogui.PAUSE = 1.2
pyautogui.press('win')
pyautogui.write(name)
pyautogui.press('backspace')
pyautogui.press('enter')
for l in pacoteOffice:
if l == name:
pyautogui.PAUSE = 1.8
pyautogui.press('enter')
virtual_assistente.run()
# inicializando
# comecar()
# Boas vindas
# Boas_vindas()
# instância das classes
virtual_assistente = ROXXANE('ROCSANE', "Jhonattan")
janela = Tk()
janela.title(f"Roxxane")
background_image = PhotoImage(file="../Nome.PNG")
background_label = Label(janela, image=background_image)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
janela.geometry('500x300')
janela_aparecer = tela('ROCSANE', janela)
virtual_assistente.start()
janela_aparecer.start()
janela.resizable(0, 0)
janela.mainloop()
|
demo6.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Topic: 线程锁
多线程和多进程最大的不同在于,多进程中,同一个变量,各自有一份拷贝存在于每个进程中,互不影响,
而多线程中,所有变量都由所有线程共享,所以,任何一个变量都可以被任何一个线程修改,
因此,线程之间共享数据最大的危险在于多个线程同时改一个变量,把内容给改乱了。
"""
import time, threading
# 假定这是你的银行存款:
balance = 0
lock = threading.Lock()
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
for i in range(100000):
# 先要获取锁:
lock.acquire()
try:
# 放心地改吧:
change_it(n)
finally:
# 改完了一定要释放锁:
lock.release()
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
|
newevent.py | #---------------------------------------------------------------------------
# Name: newevent.py
# Purpose: Easy generation of new events classes and binder objects.
#
# Author: Miki Tebeka <miki.tebeka@gmail.com>
#
# Created: 18-Sept-2006
# Copyright: (c) 2006-2020 by Total Control Software
# Licence: wxWindows license
#
# Tags: phoenix-port, documented
#---------------------------------------------------------------------------
"""
Easy generation of new events classes and binder objects.
Description
===========
This module contains two functions which makes the generation of custom wxPython events
particularly easy.
Usage
=====
Sample usage::
import wx
import time
import threading
import wx.lib.newevent as NE
MooEvent, EVT_MOO = NE.NewEvent()
GooEvent, EVT_GOO = NE.NewCommandEvent()
DELAY = 0.7
def evt_thr(win):
time.sleep(DELAY)
wx.PostEvent(win, MooEvent(moo=1))
def cmd_thr(win, id):
time.sleep(DELAY)
wx.PostEvent(win, GooEvent(id, goo=id))
ID_CMD1 = wx.NewIdRef()
ID_CMD2 = wx.NewIdRef()
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "MOO")
sizer = wx.BoxSizer(wx.VERTICAL)
self.Bind(EVT_MOO, self.on_moo)
b = wx.Button(self, -1, "Generate MOO")
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_evt_click)
b = wx.Button(self, ID_CMD1, "Generate GOO with %d" % ID_CMD1)
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_cmd_click)
b = wx.Button(self, ID_CMD2, "Generate GOO with %d" % ID_CMD2)
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_cmd_click)
self.Bind(EVT_GOO, self.on_cmd1, id=ID_CMD1)
self.Bind(EVT_GOO, self.on_cmd2, id=ID_CMD2)
self.SetSizer(sizer)
self.SetAutoLayout(True)
sizer.Fit(self)
def on_evt_click(self, e):
t = threading.Thread(target=evt_thr, args=(self, ))
t.setDaemon(True)
t.start()
def on_cmd_click(self, e):
t = threading.Thread(target=cmd_thr, args=(self, e.GetId()))
t.setDaemon(True)
t.start()
def show(self, msg, title):
dlg = wx.MessageDialog(self, msg, title, wx.OK)
dlg.ShowModal()
dlg.Destroy()
def on_moo(self, e):
self.show("MOO = %s" % e.moo, "Got Moo")
def on_cmd1(self, e):
self.show("goo = %s" % e.goo, "Got Goo (cmd1)")
def on_cmd2(self, e):
self.show("goo = %s" % e.goo, "Got Goo (cmd2)")
app = wx.App(0)
f = Frame()
f.Show(True)
app.MainLoop()
"""
__author__ = "Miki Tebeka <miki.tebeka@gmail.com>"
import wx
#---------------------------------------------------------------------------
def NewEvent():
"""
Generates a new `(event, binder)` tuple.
::
MooEvent, EVT_MOO = NewEvent()
"""
evttype = wx.NewEventType()
class _Event(wx.PyEvent):
def __init__(self, **kw):
wx.PyEvent.__init__(self)
self.SetEventType(evttype)
self._getAttrDict().update(kw)
return _Event, wx.PyEventBinder(evttype)
def NewCommandEvent():
"""
Generates a new `(command_event, binder)` tuple.
::
MooCmdEvent, EVT_MOO = NewCommandEvent()
"""
evttype = wx.NewEventType()
class _Event(wx.PyCommandEvent):
def __init__(self, id, **kw):
wx.PyCommandEvent.__init__(self, evttype, id)
self._getAttrDict().update(kw)
return _Event, wx.PyEventBinder(evttype, 1)
#---------------------------------------------------------------------------
def _test():
"""A little smoke test"""
import time
import threading
MooEvent, EVT_MOO = NewEvent()
GooEvent, EVT_GOO = NewCommandEvent()
DELAY = 0.7
def evt_thr(win):
time.sleep(DELAY)
wx.PostEvent(win, MooEvent(moo=1))
def cmd_thr(win, id):
time.sleep(DELAY)
wx.PostEvent(win, GooEvent(id, goo=id))
ID_CMD1 = wx.NewIdRef()
ID_CMD2 = wx.NewIdRef()
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "MOO")
sizer = wx.BoxSizer(wx.VERTICAL)
self.Bind(EVT_MOO, self.on_moo)
b = wx.Button(self, -1, "Generate MOO")
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_evt_click)
b = wx.Button(self, ID_CMD1, "Generate GOO with %d" % ID_CMD1)
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_cmd_click)
b = wx.Button(self, ID_CMD2, "Generate GOO with %d" % ID_CMD2)
sizer.Add(b, 1, wx.EXPAND)
b.Bind(wx.EVT_BUTTON, self.on_cmd_click)
self.Bind(EVT_GOO, self.on_cmd1, id=ID_CMD1)
self.Bind(EVT_GOO, self.on_cmd2, id=ID_CMD2)
self.SetSizer(sizer)
self.SetAutoLayout(True)
sizer.Fit(self)
def on_evt_click(self, e):
t = threading.Thread(target=evt_thr, args=(self, ))
t.setDaemon(True)
t.start()
def on_cmd_click(self, e):
t = threading.Thread(target=cmd_thr, args=(self, e.GetId()))
t.setDaemon(True)
t.start()
def show(self, msg, title):
dlg = wx.MessageDialog(self, msg, title, wx.OK)
dlg.ShowModal()
dlg.Destroy()
def on_moo(self, e):
self.show("MOO = %s" % e.moo, "Got Moo")
def on_cmd1(self, e):
self.show("goo = %s" % e.goo, "Got Goo (cmd1)")
def on_cmd2(self, e):
self.show("goo = %s" % e.goo, "Got Goo (cmd2)")
app = wx.App(0)
f = Frame()
f.Show(True)
app.MainLoop()
if __name__ == "__main__":
_test()
|
launch_test.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import shutil
import subprocess
import tempfile
import unittest
import uuid
from unittest.mock import patch
import torchelastic.distributed.launch as launch
import torchelastic.rendezvous.etcd_rendezvous # noqa: F401
from test_utils import is_tsan
from torch.multiprocessing import start_processes
from torchelastic.rendezvous.etcd_server import EtcdServer
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class LaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
f"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
f"--monitor_interval=1",
f"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
f"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
f"--monitor_interval=1",
f"--start_method=fork",
f"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
# @unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_wrapper_fn_kill_script_process(self):
"""
tests that the wrapper_fn properly terminates
the script process (the script process is the sub_sub_process of
the agent
"""
nprocs = 2
sleep = 300
# wraps wrapper_fn to be torch.multiprocessing compatible
# which requires rank to be passed as first arugment
def wrap_wrap(rank, *args):
launch.wrapper_fn(*args)
context = start_processes(
fn=wrap_wrap,
args=(None, (path("bin/sleep_script.py"), "--sleep", f"{sleep}")),
nprocs=nprocs,
join=False,
start_method="fork",
)
# quick check to see that the wrapper_fn started running
# without this join() call we don't see an exception on typos
# and other silly mistakes (silently fails)
context.join(timeout=-1)
script_pids = []
for wrapper_fn_pid in context.pids():
script_pid = get_child_pids(wrapper_fn_pid)
# there should only be one child of wrapper_fn
self.assertEqual(1, len(script_pid))
script_pids.append(script_pid[0])
for wrapper_fn_proc in context.processes:
wrapper_fn_proc.terminate()
wrapper_fn_proc.join()
for script_pid in script_pids:
self.assertFalse(pid_exists(script_pid))
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
f"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
f"--monitor_interval=1",
f"--start_method=fork",
f"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
f"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
f"--monitor_interval=1",
f"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
f"--standalone",
f"--monitor_interval=1",
f"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
f"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
f"--monitor_interval=1",
f"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.